From 547421b2435a176663d4a63008d334cd1b10863c Mon Sep 17 00:00:00 2001 From: Jake LoRocco Date: Thu, 7 Aug 2025 14:04:59 -0400 Subject: [PATCH 01/14] testing --- test/backends/test_huggingface.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/backends/test_huggingface.py b/test/backends/test_huggingface.py index aab288a3..878dd5f5 100644 --- a/test/backends/test_huggingface.py +++ b/test/backends/test_huggingface.py @@ -41,6 +41,7 @@ def test_constraint_alora(self): constraint="The answer mention that there is a b in the middle of one of the strings but not the other.", force_yn=False, # make sure that the alora naturally output Y and N without constrained generation ) + print(alora_output) assert alora_output in ["Y", "N"], alora_output self.m.reset() From c9d1dc11702ec8f6a043e0d9cefb1b61001edb96 Mon Sep 17 00:00:00 2001 From: Jake LoRocco Date: Thu, 7 Aug 2025 15:15:46 -0400 Subject: [PATCH 02/14] fix failing tests --- test/backends/test_huggingface.py | 4 ++-- test/backends/test_ollama.py | 4 ++-- test/backends/test_types.py | 5 ++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/test/backends/test_huggingface.py b/test/backends/test_huggingface.py index 878dd5f5..39361072 100644 --- a/test/backends/test_huggingface.py +++ b/test/backends/test_huggingface.py @@ -33,7 +33,8 @@ def test_system_prompt(self): def test_constraint_alora(self): self.m.reset() answer = self.m.instruct( - "Corporate wants you to find the difference between these two strings: aaaaaaaaaa aaaaabaaaa" + "Corporate wants you to find the difference between these two strings: aaaaaaaaaa aaaaabaaaa", + model_options={ModelOption.MAX_NEW_TOKENS: 100} ) alora_output = self.backend.get_aloras()[0].generate_using_strings( input="Find the difference between these two strings: aaaaaaaaaa aaaaabaaaa", @@ -41,7 +42,6 @@ def test_constraint_alora(self): constraint="The answer mention that there is a b in the middle of one of the strings but not the other.", force_yn=False, # make sure that the alora naturally output Y and N without constrained generation ) - print(alora_output) assert alora_output in ["Y", "N"], alora_output self.m.reset() diff --git a/test/backends/test_ollama.py b/test/backends/test_ollama.py index 859f5b44..a0ab2c9c 100644 --- a/test/backends/test_ollama.py +++ b/test/backends/test_ollama.py @@ -5,6 +5,7 @@ import json from typing_extensions import Annotated from mellea.backends.types import ModelOption +import pytest class Test_SmokeTestComponents: @@ -87,6 +88,7 @@ def test_generate_from_raw(self): assert len(results) == len(prompts) + @pytest.mark.xfail(reason="ollama sometimes fails generated structured outputs") def test_generate_from_raw_with_format(self): prompts = ["what is 1+1?", "what is 2+2?", "what is 3+3?", "what is 4+4?"] @@ -112,6 +114,4 @@ class Answer(pydantic.BaseModel): if __name__ == "__main__": - import pytest - pytest.main([__file__]) diff --git a/test/backends/test_types.py b/test/backends/test_types.py index 39ab091d..cff35f05 100644 --- a/test/backends/test_types.py +++ b/test/backends/test_types.py @@ -18,7 +18,7 @@ def test_model_option_remove(): ), "dict with removed special keys did not match expected" -def test_model_option_replace_to_common_opts(capfd): +def test_model_option_replace_to_common_opts(caplog): model_opts = { ModelOption.CONTEXT_WINDOW: 3, ModelOption.TEMPERATURE: 1, @@ -41,8 +41,7 @@ def test_model_option_replace_to_common_opts(capfd): ), "dict with replaced keys did not match expected" # There should also be a logged message due to context_window key clashes. - out, _ = capfd.readouterr() - assert "old_key (context_size) to new_key (@@@context_window@@@): lost value associated with old_key (4) and kept original value of new_key (3)" in out, "expected log for conflicting keys not found" + assert "old_key (context_size) to new_key (@@@context_window@@@): lost value associated with old_key (4) and kept original value of new_key (3)" in caplog.text, f"expected log for conflicting keys not found in: {caplog.text}" def test_model_option_replace_to_backend_specific(): From b4285fd534384c5f6dbb5bcddf076fe0b215adb8 Mon Sep 17 00:00:00 2001 From: Jacob LoRocco Date: Fri, 8 Aug 2025 11:08:21 -0400 Subject: [PATCH 03/14] add granite 3.3 alora and move hf test to use that --- .../aloras/huggingface/granite_aloras.py | 82 ++++++++++++++++--- test/backends/test_huggingface.py | 4 +- 2 files changed, 71 insertions(+), 15 deletions(-) diff --git a/mellea/backends/aloras/huggingface/granite_aloras.py b/mellea/backends/aloras/huggingface/granite_aloras.py index 9d4391e0..f5d2ffbb 100644 --- a/mellea/backends/aloras/huggingface/granite_aloras.py +++ b/mellea/backends/aloras/huggingface/granite_aloras.py @@ -18,10 +18,25 @@ def __init__( path_or_model_id: str, generation_prompt: str, backend: LocalHFBackend, + *, + constraint_prompt: str | None = None, + include_constraint_in_alora_offset: bool = False, ): - """Initialize after checking that the backend is correct.""" - assert backend._hf_model_id == "ibm-granite/granite-3.2-8b-instruct" + """Initialize after checking that the backend is correct. + + Args: + constraint_prompt: a template that the constraint can be interpolated into; can only have a single `{}` slot. + include_constraint_in_alora_offset: whether to include the constraint prompt in the alora offset + """ super().__init__(name, path_or_model_id, generation_prompt, backend) + + # Maintain default behavior. + if constraint_prompt is None: + constraint_prompt = "\nRequirement: {}<|end_of_text|>\n" + + self._constraint_prompt = constraint_prompt + self._include_constraint_in_alora_offset = include_constraint_in_alora_offset + # We do a lot of logging for ALoras because this is an experimental feature. Maybe we should tag these log messages? self._logger = FancyLogger.get_logger() @@ -51,8 +66,10 @@ def _generate_using_cache( self, cache_hit: HFAloraCacheInfo, constraint: str, force_yn: bool ) -> str: assert self._backend.alora_model is not None + + # Must tokenize the constraint here since the requirement isn't known at initialization. constraint_tokens = self._backend._tokenizer( - f"\nRequirement: {constraint}<|end_of_text|>\n", return_tensors="pt" + self._constraint_prompt.format(constraint), return_tensors="pt" ).to(self._backend._device) input_combined = { @@ -74,7 +91,14 @@ def _generate_using_cache( ), } - alora_offsets = [self._generation_prompt_tokens["input_ids"].shape[1] - 1] + if not self._include_constraint_in_alora_offset: + alora_offsets = [self._generation_prompt_tokens["input_ids"].shape[1] - 1] + else: + alora_offsets = [ + constraint_tokens["input_ids"].shape[1] + + self._generation_prompt_tokens["input_ids"].shape[1] + - 2 + ] self._logger.debug( f"Prompt for cached aLoRA({self.name}):\n {self._backend._tokenizer.decode(input_combined['input_ids'][0])}" ) @@ -136,7 +160,9 @@ def _generate_not_using_cache( templatized = self._backend._tokenizer.apply_chat_template(chat, tokenize=False) assert type(templatized) is str - templatized = templatized + f"\nRequirement: {constraint}<|end_of_text|>\n" + + # Must tokenize the constraint here since the requirement isn't known at initialization. + templatized = templatized + self._constraint_prompt.format(constraint) tokenized = self._backend._tokenizer(templatized, return_tensors="pt").to( self._backend._device @@ -156,7 +182,19 @@ def _generate_not_using_cache( ), } - alora_offsets = [self._generation_prompt_tokens["input_ids"].shape[1] - 1] + if not self._include_constraint_in_alora_offset: + alora_offsets = [self._generation_prompt_tokens["input_ids"].shape[1] - 1] + else: + # Get the constraint tokens separately so that we can calculate the alora offsets. + constraint_tokens = self._backend._tokenizer( + self._constraint_prompt.format(constraint), return_tensors="pt" + ).to(self._backend._device) + + alora_offsets = [ + constraint_tokens["input_ids"].shape[1] + + self._generation_prompt_tokens["input_ids"].shape[1] + - 2 + ] self._logger.debug( f"Prompt for non-cached aLoRA({self.name}):\n{self._backend._tokenizer.decode(input_combined['input_ids'][0])}" @@ -200,11 +238,29 @@ def _generate_not_using_cache( def add_granite_aloras(backend: LocalHFBackend): """Adds the IBM Granite "starter pack" ALoras to a backend.""" - backend.add_alora( - HFConstraintAlora( - name="constraint", - path_or_model_id="ibm-granite/granite-3.2-8b-alora-requirement-check", - generation_prompt="<|start_of_role|>check_requirement<|end_of_role|>", - backend=backend, + if backend._hf_model_id == "ibm-granite/granite-3.2-8b-instruct": + backend.add_alora( + HFConstraintAlora( + name="constraint", + path_or_model_id="ibm-granite/granite-3.2-8b-alora-requirement-check", + generation_prompt="<|start_of_role|>check_requirement<|end_of_role|>", + backend=backend, + constraint_prompt="\nRequirement: {}<|end_of_text|>\n", + include_constraint_in_alora_offset=False, + ) + ) + elif backend._hf_model_id == "ibm-granite/granite-3.3-8b-instruct": + backend.add_alora( + HFConstraintAlora( + name="constraint", + path_or_model_id="ibm-granite/granite-3.3-8b-alora-requirement-check", + generation_prompt="<|start_of_role|>check_requirement<|end_of_role|>", + backend=backend, + constraint_prompt="\n<|start_of_role|>requirement<|end_of_role|>{}<|end_of_text|>\n", + include_constraint_in_alora_offset=True, + ) + ) + else: + raise ValueError( + f"cannot add_granite_aloras to unknown huggingface model_id / backend: {backend._hf_model_id}" ) - ) diff --git a/test/backends/test_huggingface.py b/test/backends/test_huggingface.py index 39361072..21e9c04c 100644 --- a/test/backends/test_huggingface.py +++ b/test/backends/test_huggingface.py @@ -15,7 +15,7 @@ class TestHFALoraStuff: backend = LocalHFBackend( - model_id="ibm-granite/granite-3.2-8b-instruct", + model_id="ibm-granite/granite-3.3-8b-instruct", formatter=TemplateFormatter(model_id="ibm-granite/granite-4.0-tiny-preview"), cache=SimpleLRUCache(5), ) @@ -34,7 +34,7 @@ def test_constraint_alora(self): self.m.reset() answer = self.m.instruct( "Corporate wants you to find the difference between these two strings: aaaaaaaaaa aaaaabaaaa", - model_options={ModelOption.MAX_NEW_TOKENS: 100} + model_options={ModelOption.MAX_NEW_TOKENS: 300}, # Until aloras get a bit better, try not to abruptly end generation. ) alora_output = self.backend.get_aloras()[0].generate_using_strings( input="Find the difference between these two strings: aaaaaaaaaa aaaaabaaaa", From cde9a74a22e3a23222ade2304ddb52ce2be7dedd Mon Sep 17 00:00:00 2001 From: Jake LoRocco Date: Fri, 8 Aug 2025 11:15:38 -0400 Subject: [PATCH 04/14] edit tests so generation is shorter --- test/backends/test_huggingface.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/backends/test_huggingface.py b/test/backends/test_huggingface.py index 21e9c04c..2da6541f 100644 --- a/test/backends/test_huggingface.py +++ b/test/backends/test_huggingface.py @@ -15,7 +15,7 @@ class TestHFALoraStuff: backend = LocalHFBackend( - model_id="ibm-granite/granite-3.3-8b-instruct", + model_id="ibm-granite/granite-3.2-8b-instruct", formatter=TemplateFormatter(model_id="ibm-granite/granite-4.0-tiny-preview"), cache=SimpleLRUCache(5), ) @@ -33,7 +33,7 @@ def test_system_prompt(self): def test_constraint_alora(self): self.m.reset() answer = self.m.instruct( - "Corporate wants you to find the difference between these two strings: aaaaaaaaaa aaaaabaaaa", + "Corporate wants you to find the difference between these two strings: aaaaaaaaaa aaaaabaaaa. Be concise and don't write code to answer the question.", model_options={ModelOption.MAX_NEW_TOKENS: 300}, # Until aloras get a bit better, try not to abruptly end generation. ) alora_output = self.backend.get_aloras()[0].generate_using_strings( From c79c56810116636f8e7240ba85f316ebee38f561 Mon Sep 17 00:00:00 2001 From: Hendrik Strobelt Date: Thu, 7 Aug 2025 16:02:37 +0200 Subject: [PATCH 05/14] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 18712d3f..bce6062f 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ with structured, maintainable, robust, and efficient AI workflows. [//]: # ([![arXiv](https://img.shields.io/badge/arXiv-2408.09869-b31b1b.svg)](https://arxiv.org/abs/2408.09869)) -[![Docs](https://img.shields.io/badge/docs-live-brightgreen)](https://docs.mellea.ai/) +[![Docs](https://img.shields.io/badge/docs-live-brightgreen)](https://mellea.ai/) [![PyPI version](https://img.shields.io/pypi/v/mellea)](https://pypi.org/project/mellea/) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/mellea)](https://pypi.org/project/mellea/) [![uv](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/uv/main/assets/badge/v0.json)](https://github.com/astral-sh/uv) From 0df8cd90dbcf7480620a6f8ef86f9c1b975c01ec Mon Sep 17 00:00:00 2001 From: Hendrik Strobelt Date: Thu, 7 Aug 2025 16:06:53 +0200 Subject: [PATCH 06/14] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index bce6062f..99d0d0f4 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,8 @@ with structured, maintainable, robust, and efficient AI workflows. [![uv](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/uv/main/assets/badge/v0.json)](https://github.com/astral-sh/uv) [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff) [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)](https://github.com/pre-commit/pre-commit) -![GitHub License](https://img.shields.io/github/license/generative-computing/mellea) +[![GitHub License](https://img.shields.io/github/license/generative-computing/mellea)](https://img.shields.io/github/license/generative-computing/mellea) + ## Features From c7c687e9b23fbd22888bc143c1aa2614c9c118e3 Mon Sep 17 00:00:00 2001 From: Kenneth Ocheltree Date: Thu, 7 Aug 2025 10:27:09 -0400 Subject: [PATCH 07/14] Update to use PyPi --- ...compositionality_with_generative_slots.ipynb | 7 ++----- docs/examples/notebooks/context_example.ipynb | 7 ++----- docs/examples/notebooks/document_mobject.ipynb | 7 ++----- docs/examples/notebooks/example.ipynb | 7 ++----- .../notebooks/instruct_validate_repair.ipynb | 7 ++----- docs/examples/notebooks/m_serve_example.ipynb | 17 ++--------------- docs/examples/notebooks/mcp_example.ipynb | 7 ++----- .../notebooks/model_options_example.ipynb | 7 ++----- .../notebooks/sentiment_classifier.ipynb | 7 ++----- docs/examples/notebooks/simple_email.ipynb | 7 ++----- docs/examples/notebooks/table_mobject.ipynb | 7 ++----- 11 files changed, 22 insertions(+), 65 deletions(-) diff --git a/docs/examples/notebooks/compositionality_with_generative_slots.ipynb b/docs/examples/notebooks/compositionality_with_generative_slots.ipynb index 9c1d06c6..010aa828 100644 --- a/docs/examples/notebooks/compositionality_with_generative_slots.ipynb +++ b/docs/examples/notebooks/compositionality_with_generative_slots.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/context_example.ipynb b/docs/examples/notebooks/context_example.ipynb index 82e05eab..e182d2f5 100644 --- a/docs/examples/notebooks/context_example.ipynb +++ b/docs/examples/notebooks/context_example.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/document_mobject.ipynb b/docs/examples/notebooks/document_mobject.ipynb index 501a50ba..b13b2c90 100644 --- a/docs/examples/notebooks/document_mobject.ipynb +++ b/docs/examples/notebooks/document_mobject.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/example.ipynb b/docs/examples/notebooks/example.ipynb index ebae1bc0..e8714b3f 100644 --- a/docs/examples/notebooks/example.ipynb +++ b/docs/examples/notebooks/example.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/instruct_validate_repair.ipynb b/docs/examples/notebooks/instruct_validate_repair.ipynb index 589eb0d2..590a69d2 100644 --- a/docs/examples/notebooks/instruct_validate_repair.ipynb +++ b/docs/examples/notebooks/instruct_validate_repair.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/m_serve_example.ipynb b/docs/examples/notebooks/m_serve_example.ipynb index c7216d70..7b3ea7d5 100644 --- a/docs/examples/notebooks/m_serve_example.ipynb +++ b/docs/examples/notebooks/m_serve_example.ipynb @@ -10,16 +10,6 @@ "This Jupyter notebook runs M Serve on Colab." ] }, - { - "cell_type": "markdown", - "metadata": { - "id": "IByYhgKy9WCo" - }, - "source": [ - "# Getting Started with Mellea\n", - "This Jupyter notebook runs on Colab and runs the first piece of generative code." - ] - }, { "cell_type": "markdown", "metadata": { @@ -50,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -61,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/mcp_example.ipynb b/docs/examples/notebooks/mcp_example.ipynb index c41b6f48..8fb792da 100644 --- a/docs/examples/notebooks/mcp_example.ipynb +++ b/docs/examples/notebooks/mcp_example.ipynb @@ -41,7 +41,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -52,10 +52,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/model_options_example.ipynb b/docs/examples/notebooks/model_options_example.ipynb index b17fd078..21429ab0 100644 --- a/docs/examples/notebooks/model_options_example.ipynb +++ b/docs/examples/notebooks/model_options_example.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/sentiment_classifier.ipynb b/docs/examples/notebooks/sentiment_classifier.ipynb index 4e4a2e86..372af834 100644 --- a/docs/examples/notebooks/sentiment_classifier.ipynb +++ b/docs/examples/notebooks/sentiment_classifier.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/simple_email.ipynb b/docs/examples/notebooks/simple_email.ipynb index 0de34384..e1369f19 100644 --- a/docs/examples/notebooks/simple_email.ipynb +++ b/docs/examples/notebooks/simple_email.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { diff --git a/docs/examples/notebooks/table_mobject.ipynb b/docs/examples/notebooks/table_mobject.ipynb index a4bca0a8..791709ea 100644 --- a/docs/examples/notebooks/table_mobject.ipynb +++ b/docs/examples/notebooks/table_mobject.ipynb @@ -40,7 +40,7 @@ }, "source": [ "## Install Mellea\n", - "We run `uv pip install .` to install Mellea." + "We run `uv pip install mellea` to install Mellea." ] }, { @@ -51,10 +51,7 @@ }, "outputs": [], "source": [ - "import os\n", - "!git clone https://github.com/generative-computing/mellea.git --quiet\n", - "os.chdir(\"mellea\")\n", - "!uv pip install . -qq" + "!uv pip install mellea -q" ] }, { From e1f2a52dd7c296d80f6d62e1807cae334d0a44ee Mon Sep 17 00:00:00 2001 From: Kenneth Ocheltree Date: Thu, 7 Aug 2025 10:48:24 -0400 Subject: [PATCH 08/14] Update to open colab in a new window --- README.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 99d0d0f4..f6005211 100644 --- a/README.md +++ b/README.md @@ -79,17 +79,17 @@ uv run --with mellea docs/examples/tutorial/example.py | Notebook | Try in Colab | Goal | |----------|--------------|------| -| Hello, World | Open In Colab | Quick‑start demo | -| Simple Email | Open In Colab | Using the `m.instruct` primitive | -| Instruct-Validate-Repair | Open In Colab | Introduces our first generative programming design pattern | -| Model Options | Open In Colab | Demonstrates how to pass model options through to backends | -| Sentiment Classifier | Open In Colab | Introduces the `@generative` decorator | -| Managing Context | Open In Colab | Shows how to construct and manage context in a `MelleaSession` | -| Generative OOP | Open In Colab | Demonstrates object-oriented generative programming in Mellea | -| Rich Documents | Open In Colab | A generative program that uses Docling to work with rich-text documents | -| Composing Generative Functions | Open In Colab | Demonstrates contract-oriented programming in Mellea | -| `m serve` | Open In Colab | Serve a generative program as an openai-compatible model endpoint | -| MCP | Open In Colab | Mellea + MCP | +| Hello, World | Open In Colab | Quick‑start demo | +| Simple Email | Open In Colab | Using the `m.instruct` primitive | +| Instruct-Validate-Repair | Open In Colab | Introduces our first generative programming design pattern | +| Model Options | Open In Colab | Demonstrates how to pass model options through to backends | +| Sentiment Classifier | Open In Colab | Introduces the `@generative` decorator | +| Managing Context | Open In Colab | Shows how to construct and manage context in a `MelleaSession` | +| Generative OOP | Open In Colab | Demonstrates object-oriented generative programming in Mellea | +| Rich Documents | Open In Colab | A generative program that uses Docling to work with rich-text documents | +| Composing Generative Functions | Open In Colab | Demonstrates contract-oriented programming in Mellea | +| `m serve` | Open In Colab | Serve a generative program as an openai-compatible model endpoint | +| MCP | Open In Colab | Mellea + MCP | ### Installing from source From 42f78d43df17598480fdc85393e0264be822f160 Mon Sep 17 00:00:00 2001 From: jakelorocco <59755218+jakelorocco@users.noreply.github.com> Date: Thu, 7 Aug 2025 10:58:40 -0400 Subject: [PATCH 09/14] update install commands (#11) --- README.md | 5 +++-- docs/alora.md | 4 ++-- docs/tutorial.md | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index f6005211..ab6a34b3 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ uv pip install mellea ``` > [!NOTE] -> If running on an Intel mac, you may get errors related to torch/torchvision versions. Conda maintains updated versions of these packages. You will need to create a conda environment and run `conda install 'torchvision>=0.22.0'` (this should also install pytorch and torchvision-extra). Then, you should be able to run `uv pip install .`. To run the examples, you will need to use `python ` inside the conda environment instead of `uv run --with mellea `. +> If running on an Intel mac, you may get errors related to torch/torchvision versions. Conda maintains updated versions of these packages. You will need to create a conda environment and run `conda install 'torchvision>=0.22.0'` (this should also install pytorch and torchvision-extra). Then, you should be able to run `uv pip install mellea`. To run the examples, you will need to use `python ` inside the conda environment instead of `uv run --with mellea `. > [!NOTE] > If you are using python >= 3.13, you may encounter an issue where outlines cannot be installed due to rust compiler issues (`error: can't find Rust compiler`). You can either downgrade to python 3.12 or install the [rust compiler](https://www.rust-lang.org/tools/install) to build the wheel for outlines locally. @@ -70,7 +70,8 @@ print(m.chat("What is the etymology of mellea?").content) Then run it: -> NOTE: Before we get started, you will need to download and install [ollama](https://ollama.com/). Mellea can work with many different types of backends, but everything in this tutorial will "just work" on a Macbook running IBM's Granite 3.3 8B model. +> [!NOTE] +> Before we get started, you will need to download and install [ollama](https://ollama.com/). Mellea can work with many different types of backends, but everything in this tutorial will "just work" on a Macbook running IBM's Granite 3.3 8B model. ```shell uv run --with mellea docs/examples/tutorial/example.py ``` diff --git a/docs/alora.md b/docs/alora.md index 7af42d7e..75d02037 100644 --- a/docs/alora.md +++ b/docs/alora.md @@ -9,7 +9,7 @@ Mellea provides a command-line interface for training and uploading [LoRA](https From the root of the repository: ```bash -pip install -e . +pip install mellea huggingface-cli login # Optional: only needed for uploads ``` @@ -82,7 +82,7 @@ This will: ## 🛠 Requirements - Python 3.8+ -- Install the following dependencies manually or via `pip install -e .`: +- Install the following dependencies manually or via `pip install mellea`: - `transformers` - `trl` - `peft` diff --git a/docs/tutorial.md b/docs/tutorial.md index aaa92b33..3dde23b4 100644 --- a/docs/tutorial.md +++ b/docs/tutorial.md @@ -56,7 +56,7 @@ We also recommend that you download and install [uv](https://docs.astral.sh/uv/# uv run example_name.py --with mellea ``` > [!NOTE] -> If running on an Intel mac, you may get errors related to torch/torchvision versions. Conda maintains updated versions of these packages. You will need to create a conda environment and run `conda install 'torchvision>=0.22.0'` (this should also install pytorch and torchvision-extra). Then, you should be able to run `uv pip install .`. To run the examples, you will need to use `python ` inside the conda environment instead of `uv run --with mellea `. +> If running on an Intel mac, you may get errors related to torch/torchvision versions. Conda maintains updated versions of these packages. You will need to create a conda environment and run `conda install 'torchvision>=0.22.0'` (this should also install pytorch and torchvision-extra). Then, you should be able to run `uv pip install mellea`. To run the examples, you will need to use `python ` inside the conda environment instead of `uv run --with mellea `. > [!NOTE] > If you are using python >= 3.13, you may encounter an issue where outlines cannot be installed due to rust compiler issues (`error: can't find Rust compiler`). You can either downgrade to python 3.12 or install the [rust compiler](https://www.rust-lang.org/tools/install) to build the wheel for outlines locally. From 470bc7d7172650d4409338289349f978558b8cd3 Mon Sep 17 00:00:00 2001 From: Kenneth Ocheltree Date: Thu, 7 Aug 2025 13:27:48 -0400 Subject: [PATCH 10/14] Fix colab link for model_options_example --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ab6a34b3..8f3f5d75 100644 --- a/README.md +++ b/README.md @@ -83,7 +83,7 @@ uv run --with mellea docs/examples/tutorial/example.py | Hello, World | Open In Colab | Quick‑start demo | | Simple Email | Open In Colab | Using the `m.instruct` primitive | | Instruct-Validate-Repair | Open In Colab | Introduces our first generative programming design pattern | -| Model Options | Open In Colab | Demonstrates how to pass model options through to backends | +| Model Options | Open In Colab | Demonstrates how to pass model options through to backends | | Sentiment Classifier | Open In Colab | Introduces the `@generative` decorator | | Managing Context | Open In Colab | Shows how to construct and manage context in a `MelleaSession` | | Generative OOP | Open In Colab | Demonstrates object-oriented generative programming in Mellea | From c939f0cec48dd908680ca6acb07d5dfe7318fb75 Mon Sep 17 00:00:00 2001 From: Kenneth Ocheltree Date: Thu, 7 Aug 2025 14:37:36 -0400 Subject: [PATCH 11/14] Fix notebook issues --- .../notebooks/compositionality_with_generative_slots.ipynb | 4 +--- docs/examples/notebooks/mcp_example.ipynb | 7 ++++--- docs/examples/notebooks/model_options_example.ipynb | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/examples/notebooks/compositionality_with_generative_slots.ipynb b/docs/examples/notebooks/compositionality_with_generative_slots.ipynb index 010aa828..ed73be81 100644 --- a/docs/examples/notebooks/compositionality_with_generative_slots.ipynb +++ b/docs/examples/notebooks/compositionality_with_generative_slots.ipynb @@ -115,10 +115,8 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ "## Specify Contract Functions\n", "To help us compose these libraries, we introduce a set of contracts that gate function composition and then use those contracts to short-circuit non-sensical compositions of library components:" diff --git a/docs/examples/notebooks/mcp_example.ipynb b/docs/examples/notebooks/mcp_example.ipynb index 8fb792da..dee307e1 100644 --- a/docs/examples/notebooks/mcp_example.ipynb +++ b/docs/examples/notebooks/mcp_example.ipynb @@ -17,9 +17,9 @@ "id": "ZIu6B1Ht927Z" }, "source": [ - "## Install Ollama\n", + "## Install Ollama and MCP\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we install ollama and mcp" ] }, { @@ -31,7 +31,8 @@ "outputs": [], "source": [ "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!nohup ollama serve &\n", + "!uv pip install mcp -q" ] }, { diff --git a/docs/examples/notebooks/model_options_example.ipynb b/docs/examples/notebooks/model_options_example.ipynb index 21429ab0..8b40fe84 100644 --- a/docs/examples/notebooks/model_options_example.ipynb +++ b/docs/examples/notebooks/model_options_example.ipynb @@ -82,7 +82,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Spefify Options on Backend Initialization\n", + "## Specify Options on Backend Initialization\n", "You can add any key-value option pair supported by the backend to the model_options dictionary, and those options are passed along to the inference engine (even if a Mellea-specific ModelOption is defined for that option). This means you can safely copy over model option parameters from exiting codebases as-is:" ] }, From e2d96d4ffd29098b5f7a603170b162858abe3e54 Mon Sep 17 00:00:00 2001 From: Kenneth Ocheltree Date: Thu, 7 Aug 2025 15:38:11 -0400 Subject: [PATCH 12/14] Fix model options --- docs/examples/notebooks/model_options_example.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/notebooks/model_options_example.ipynb b/docs/examples/notebooks/model_options_example.ipynb index 8b40fe84..51de395c 100644 --- a/docs/examples/notebooks/model_options_example.ipynb +++ b/docs/examples/notebooks/model_options_example.ipynb @@ -118,7 +118,7 @@ "outputs": [], "source": [ "answer = m.instruct(\n", - " \"What is 2x2?\", model_options={\"temperature\": 0.5, \"num_predict\": 5}\n", + " \"What is 2x2?\", model_options={\"temperature\": 0.5, \"num_predict\": 50}\n", ")\n", "print(str(answer))" ] From c1be03fd9e77241e24ee2884f1a2d8a982f88d0d Mon Sep 17 00:00:00 2001 From: Kenneth Ocheltree Date: Thu, 7 Aug 2025 17:50:36 -0400 Subject: [PATCH 13/14] Wrap cell output --- .../compositionality_with_generative_slots.ipynb | 10 +++++++--- docs/examples/notebooks/context_example.ipynb | 10 +++++++--- docs/examples/notebooks/document_mobject.ipynb | 10 +++++++--- docs/examples/notebooks/example.ipynb | 10 +++++++--- .../notebooks/instruct_validate_repair.ipynb | 10 +++++++--- docs/examples/notebooks/m_serve_example.ipynb | 10 +++++++--- docs/examples/notebooks/mcp_example.ipynb | 12 ++++++++---- docs/examples/notebooks/model_options_example.ipynb | 10 +++++++--- docs/examples/notebooks/sentiment_classifier.ipynb | 10 +++++++--- docs/examples/notebooks/simple_email.ipynb | 10 +++++++--- docs/examples/notebooks/table_mobject.ipynb | 10 +++++++--- 11 files changed, 78 insertions(+), 34 deletions(-) diff --git a/docs/examples/notebooks/compositionality_with_generative_slots.ipynb b/docs/examples/notebooks/compositionality_with_generative_slots.ipynb index ed73be81..08a06e65 100644 --- a/docs/examples/notebooks/compositionality_with_generative_slots.ipynb +++ b/docs/examples/notebooks/compositionality_with_generative_slots.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/context_example.ipynb b/docs/examples/notebooks/context_example.ipynb index e182d2f5..20c62882 100644 --- a/docs/examples/notebooks/context_example.ipynb +++ b/docs/examples/notebooks/context_example.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/document_mobject.ipynb b/docs/examples/notebooks/document_mobject.ipynb index b13b2c90..460f0171 100644 --- a/docs/examples/notebooks/document_mobject.ipynb +++ b/docs/examples/notebooks/document_mobject.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/example.ipynb b/docs/examples/notebooks/example.ipynb index e8714b3f..99ff57f4 100644 --- a/docs/examples/notebooks/example.ipynb +++ b/docs/examples/notebooks/example.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/instruct_validate_repair.ipynb b/docs/examples/notebooks/instruct_validate_repair.ipynb index 590a69d2..5fa75a02 100644 --- a/docs/examples/notebooks/instruct_validate_repair.ipynb +++ b/docs/examples/notebooks/instruct_validate_repair.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/m_serve_example.ipynb b/docs/examples/notebooks/m_serve_example.ipynb index 7b3ea7d5..d2b6684c 100644 --- a/docs/examples/notebooks/m_serve_example.ipynb +++ b/docs/examples/notebooks/m_serve_example.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/mcp_example.ipynb b/docs/examples/notebooks/mcp_example.ipynb index dee307e1..1a599685 100644 --- a/docs/examples/notebooks/mcp_example.ipynb +++ b/docs/examples/notebooks/mcp_example.ipynb @@ -19,7 +19,7 @@ "source": [ "## Install Ollama and MCP\n", "\n", - "Before we get started with Mellea, we install ollama and mcp" + "Before we get started with Mellea, we download, install and serve ollama, and install mcp. We define set_css to wrap Colab output." ] }, { @@ -30,9 +30,13 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &\n", - "!uv pip install mcp -q" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "!uv pip install mcp -q\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/model_options_example.ipynb b/docs/examples/notebooks/model_options_example.ipynb index 51de395c..0b7433bc 100644 --- a/docs/examples/notebooks/model_options_example.ipynb +++ b/docs/examples/notebooks/model_options_example.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/sentiment_classifier.ipynb b/docs/examples/notebooks/sentiment_classifier.ipynb index 372af834..abd0f32c 100644 --- a/docs/examples/notebooks/sentiment_classifier.ipynb +++ b/docs/examples/notebooks/sentiment_classifier.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/simple_email.ipynb b/docs/examples/notebooks/simple_email.ipynb index e1369f19..43e96409 100644 --- a/docs/examples/notebooks/simple_email.ipynb +++ b/docs/examples/notebooks/simple_email.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { diff --git a/docs/examples/notebooks/table_mobject.ipynb b/docs/examples/notebooks/table_mobject.ipynb index 791709ea..708b1729 100644 --- a/docs/examples/notebooks/table_mobject.ipynb +++ b/docs/examples/notebooks/table_mobject.ipynb @@ -18,7 +18,7 @@ "source": [ "## Install Ollama\n", "\n", - "Before we get started with Mellea, we download and install ollama." + "Before we get started with Mellea, we download, install and serve ollama. We define set_css to wrap Colab output." ] }, { @@ -29,8 +29,12 @@ }, "outputs": [], "source": [ - "!curl -fsSL https://ollama.com/install.sh | sh\n", - "!nohup ollama serve &" + "!curl -fsSL https://ollama.com/install.sh | sh > /dev/null\n", + "!nohup ollama serve >/dev/null 2>&1 &\n", + "\n", + "from IPython.display import HTML, display\n", + "def set_css(): display(HTML('\\n\\n'))\n", + "get_ipython().events.register('pre_run_cell',set_css)" ] }, { From 23be134da49e247bc37e8f363873b31f23547855 Mon Sep 17 00:00:00 2001 From: Jake LoRocco Date: Fri, 8 Aug 2025 11:24:37 -0400 Subject: [PATCH 14/14] update description --- mellea/backends/aloras/huggingface/granite_aloras.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mellea/backends/aloras/huggingface/granite_aloras.py b/mellea/backends/aloras/huggingface/granite_aloras.py index f5d2ffbb..87dab75c 100644 --- a/mellea/backends/aloras/huggingface/granite_aloras.py +++ b/mellea/backends/aloras/huggingface/granite_aloras.py @@ -10,7 +10,10 @@ class HFConstraintAlora(HFAlora): - """The [Requirement Checking ALora for Granite 3.2 8B](https://huggingface.co/ibm-granite/granite-3.2-8b-alora-requirement-check) checks if the specified requirement was satisfied by the most recent model generation. Only one requirement is checked at a time.""" + """The Requirement Checking ALora for Granite checks if the specified requirement was satisfied by the most recent model generation. Only one requirement is checked at a time. + + Currently supports [Granite 3.2 8B](https://huggingface.co/ibm-granite/granite-3.2-8b-alora-requirement-check) and [Granite 3.3 8B](https://huggingface.co/ibm-granite/granite-3.3-8b-alora-requirement-check) by default. + """ def __init__( self,