diff --git a/README.md b/README.md index 047ac3767..ffcd915b2 100644 --- a/README.md +++ b/README.md @@ -34,13 +34,13 @@ Check out our interactive [demo](https://aphp.github.io/edsnlp/demo/) ! You can install EDS-NLP via `pip`. We recommend pinning the library version in your projects, or use a strict package manager like [Poetry](https://python-poetry.org/). ```shell -pip install edsnlp==0.15.0 +pip install edsnlp==0.16.0 ``` or if you want to use the trainable components (using pytorch) ```shell -pip install "edsnlp[ml]==0.15.0" +pip install "edsnlp[ml]==0.16.0" ``` ### A first pipeline diff --git a/changelog.md b/changelog.md index 9e752f924..41aad3cee 100644 --- a/changelog.md +++ b/changelog.md @@ -1,12 +1,12 @@ # Changelog -## Unreleased +## v0.16.0 (2025-0.3-26) ### Added -- Hyperparameter Tuning for EDS-NLP: Introduced a new script `edsnlp.tune` for hyperparameter tuning using Optuna. This feature allows users to efficiently optimize model parameters with options for single-phase or two-phase tuning strategies. Includes support for parameter importance analysis, visualization, pruning, and automatic handling of GPU time budgets. -- Provided a [detailed tutorial](./docs/tutorials/tuning.md) on hyperparameter tuning, covering usage scenarios and configuration options. +- Hyperparameter Tuning for EDS-NLP: introduced a new script `edsnlp.tune` for hyperparameter tuning using Optuna. This feature allows users to efficiently optimize model parameters with options for single-phase or two-phase tuning strategies. Includes support for parameter importance analysis, visualization, pruning, and automatic handling of GPU time budgets. +- Provided a [detailed tutorial](https://aphp.github.io/edsnlp/v0.16.0/tutorials/tuning/) on hyperparameter tuning, covering usage scenarios and configuration options. - `ScheduledOptimizer` (e.g., `@core: "optimizer"`) now supports importing optimizers using their qualified name (e.g., `optim: "torch.optim.Adam"`). -- `edsnlp/ner_crf.` now compute confidence score on spans. +- `eds.ner_crf` now computes confidence score on spans. ### Changed @@ -25,7 +25,7 @@ - Raise an error if the batch size in `stream.shuffle(batch_size=...)` is not compatible with the stream - `eds.split` now keeps doc and span attributes in the sub-documents. -# v0.15.0 (2024-12-13) +## v0.15.0 (2024-12-13) ### Added diff --git a/docs/index.md b/docs/index.md index 3397951e2..7e8d7602b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -15,13 +15,13 @@ Check out our interactive [demo](https://aphp.github.io/edsnlp/demo/) ! You can install EDS-NLP via `pip`. We recommend pinning the library version in your projects, or use a strict package manager like [Poetry](https://python-poetry.org/). ```{: data-md-color-scheme="slate" } -pip install edsnlp==0.15.0 +pip install edsnlp==0.16.0 ``` or if you want to use the trainable components (using pytorch) ```{: data-md-color-scheme="slate" } -pip install "edsnlp[ml]==0.15.0" +pip install "edsnlp[ml]==0.16.0" ``` ### A first pipeline diff --git a/docs/tutorials/training.md b/docs/tutorials/training.md index 3ec95f1f9..31e105ae6 100644 --- a/docs/tutorials/training.md +++ b/docs/tutorials/training.md @@ -37,7 +37,7 @@ readme = "README.md" requires-python = ">3.7.1,<4.0" dependencies = [ - "edsnlp[ml]>=0.15.0", + "edsnlp[ml]>=0.16.0", "sentencepiece>=0.1.96" ] diff --git a/docs/tutorials/tuning.md b/docs/tutorials/tuning.md index d0189d839..f5cf12c2a 100644 --- a/docs/tutorials/tuning.md +++ b/docs/tutorials/tuning.md @@ -35,7 +35,7 @@ readme = "README.md" requires-python = ">3.7.1,<4.0" dependencies = [ - "edsnlp[ml]>=0.15.0", + "edsnlp[ml]>=0.16.0", "sentencepiece>=0.1.96", "optuna>=4.0.0", "plotly>=5.18.0", diff --git a/edsnlp/__init__.py b/edsnlp/__init__.py index a2391cc3c..373f9b69c 100644 --- a/edsnlp/__init__.py +++ b/edsnlp/__init__.py @@ -15,7 +15,7 @@ import edsnlp.pipes from . import reducers -__version__ = "0.15.0" +__version__ = "0.16.0" BASE_DIR = Path(__file__).parent diff --git a/pyproject.toml b/pyproject.toml index 217c38527..f0d016bd5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -76,7 +76,8 @@ ml = [ "rich-logger>=0.3.1", "torch>=1.13.0", "foldedtensor>=0.3.4", - "safetensors>=0.3.0", + "safetensors>=0.3.0; python_version>='3.8'", + "safetensors>=0.3.0,<0.5.0; python_version<'3.8'", "transformers>=4.0.0,<5.0.0", "accelerate>=0.20.3,<1.0.0", ]