From 4f235d77211617b75ac8882a36ad1be7e95ce577 Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 10:05:08 -0500 Subject: [PATCH 01/31] pytest workflow --- .github/workflows/pytest.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/workflows/pytest.yml diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml new file mode 100644 index 0000000..2df7e51 --- /dev/null +++ b/.github/workflows/pytest.yml @@ -0,0 +1,27 @@ +name: pytest + +on: + push: + branches: [main] + pull_request: + branches: [main] + workflow_dispatch: + + +jobs: + run-tests: + runs-on: ubuntu-latest + steps: + - name: Clone repo + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Setup python + uses: actions/setup-python@v4 + with: + python-version: 3.11 + + - name: Install dependencies + run: pip install griffe2md rich pytest + + - name: Run tests + run: pytest tests From 9dc61480948bec3d14d74f3d1666336369ae6691 Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 10:05:18 -0500 Subject: [PATCH 02/31] tests for `coreforecast` --- tests/test_coreforecast.py | 65 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 tests/test_coreforecast.py diff --git a/tests/test_coreforecast.py b/tests/test_coreforecast.py new file mode 100644 index 0000000..de8dcf0 --- /dev/null +++ b/tests/test_coreforecast.py @@ -0,0 +1,65 @@ +import sys + +sys.path.append("..") +from parser import MkDocstringsParser + +import pytest + + +@pytest.fixture +def setup_parser(): + parser = MkDocstringsParser() + yield parser + + +def test_regular_fn(setup_parser): + parser = setup_parser + regular_fn = """::: coreforecast.differences.num_diffs""" + output = parser.process_markdown(regular_fn) + + assert output == """### `num_diffs` + +```python +num_diffs(x, max_d=1) +``` + +Find the optimal number of differences + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`x` | [ndarray](#numpy.ndarray) | Array with the time series. | *required* +`max_d` | [int](#int) | Maximum number of differences to consider. Defaults to 1. | 1 + +**Returns:** + +Name | Type | Description +---- | ---- | ----------- +`int` | [int](#int) | Optimal number of differences. +""" + +def test_fn_w_decorator(setup_parser): + parser = setup_parser + fn_w_decorator = """::: coreforecast.expanding.expanding_mean""" + output = parser.process_markdown(fn_w_decorator) + assert output == """### `expanding_mean` + +```python +expanding_mean(x) +``` + +Compute the expanding_mean of the input array. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`x` | np.ndarray | Input array. | *required* + +**Returns:** + +Type | Description +---- | ----------- +| np.ndarray: Array with the expanding statistic +""" From 02619abffc9b1abfb526c00fc83f15ac60c03f82 Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 10:14:22 -0500 Subject: [PATCH 03/31] append path correctly --- tests/test_coreforecast.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_coreforecast.py b/tests/test_coreforecast.py index de8dcf0..c0325ad 100644 --- a/tests/test_coreforecast.py +++ b/tests/test_coreforecast.py @@ -1,6 +1,7 @@ +import os import sys -sys.path.append("..") +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) from parser import MkDocstringsParser import pytest From 27ef88e70aa770db6ea15b28e3a21238a7e6c2ce Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 10:14:29 -0500 Subject: [PATCH 04/31] pytest.ini --- pytest.ini | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 pytest.ini diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..6e1cfdf --- /dev/null +++ b/pytest.ini @@ -0,0 +1,2 @@ +[tool:pytest] +testpaths = tests \ No newline at end of file From 93ba70eeed489a40a3a2f1771a6ff6d0a756f800 Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 10:15:48 -0500 Subject: [PATCH 05/31] pyyaml dep --- .github/workflows/pytest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 2df7e51..7d94ee2 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -21,7 +21,7 @@ jobs: python-version: 3.11 - name: Install dependencies - run: pip install griffe2md rich pytest + run: pip install griffe2md rich pyyaml pytest - name: Run tests run: pytest tests From c1abeb2ee949b1cb6eb6eaccda474ade6c2c407b Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 10:17:56 -0500 Subject: [PATCH 06/31] install coreforecast --- .github/workflows/pytest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 7d94ee2..6451419 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -21,7 +21,7 @@ jobs: python-version: 3.11 - name: Install dependencies - run: pip install griffe2md rich pyyaml pytest + run: pip install griffe2md rich pyyaml coreforecast pytest - name: Run tests run: pytest tests From 2eff2ce7c93f0a0e7cd2d0cef8071c745912a51a Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 13:29:18 -0500 Subject: [PATCH 07/31] migrate common logic to conftest --- tests/conftest.py | 13 +++++++++++++ tests/test_coreforecast.py | 15 --------------- 2 files changed, 13 insertions(+), 15 deletions(-) create mode 100644 tests/conftest.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..e5c7f14 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,13 @@ +import os +import sys + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) +from parser import MkDocstringsParser + +import pytest + + +@pytest.fixture(scope="module") +def setup_parser(): + parser = MkDocstringsParser() + yield parser diff --git a/tests/test_coreforecast.py b/tests/test_coreforecast.py index c0325ad..3ee569f 100644 --- a/tests/test_coreforecast.py +++ b/tests/test_coreforecast.py @@ -1,18 +1,3 @@ -import os -import sys - -sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) -from parser import MkDocstringsParser - -import pytest - - -@pytest.fixture -def setup_parser(): - parser = MkDocstringsParser() - yield parser - - def test_regular_fn(setup_parser): parser = setup_parser regular_fn = """::: coreforecast.differences.num_diffs""" From 1ebdcef2cbbbd290b43ae28980bec7fb12e6d25d Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 13:29:52 -0500 Subject: [PATCH 08/31] tests for utilsforecast --- tests/test_utilsforecast.py | 39 +++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 tests/test_utilsforecast.py diff --git a/tests/test_utilsforecast.py b/tests/test_utilsforecast.py new file mode 100644 index 0000000..5460604 --- /dev/null +++ b/tests/test_utilsforecast.py @@ -0,0 +1,39 @@ +def test_utilsforecast_rmae(setup_parser): + fn = """::: utilsforecast.losses.rmae + handler: python + options: + docstring_style: google + heading_level: 3 + show_root_heading: true + show_source: true""" + + output = setup_parser.process_markdown(fn) + + assert output == """### `rmae` + +```python +rmae(df, models, baseline, id_col='unique_id', target_col='y') +``` + +Relative Mean Absolute Error (RMAE) + +Calculates the RAME between two sets of forecasts (from two different forecasting methods). +A number smaller than one implies that the forecast in the +numerator is better than the forecast in the denominator. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`df` | pandas or polars DataFrame | Input dataframe with id, times, actuals and predictions. | *required* +`models` | list of str | Columns that identify the models predictions. | *required* +`baseline` | [str](#str) | Column that identifies the baseline model predictions. | *required* +`id_col` | [str](#str) | Column that identifies each serie. Defaults to 'unique_id'. | 'unique_id' +`target_col` | [str](#str) | Column that contains the target. Defaults to 'y'. | 'y' + +**Returns:** + +Type | Description +---- | ----------- +[DFType](#utilsforecast.compat.DFType) | pandas or polars DataFrame: dataframe with one row per id and one column per model. +""" From 523b8e2a98f2fcfab3ef612b95bacf33025d9b8a Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 13:45:55 -0500 Subject: [PATCH 09/31] install utilsforecast --- .github/workflows/pytest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 6451419..7dd08d8 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -21,7 +21,7 @@ jobs: python-version: 3.11 - name: Install dependencies - run: pip install griffe2md rich pyyaml coreforecast pytest + run: pip install griffe2md rich pyyaml coreforecast utilsforecast pytest - name: Run tests run: pytest tests From 24ddd6b1243a46acbce84df51f148bb81490f2b1 Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 21:13:34 -0500 Subject: [PATCH 10/31] revise logic for default options --- parser.py | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/parser.py b/parser.py index 701bc11..9d50523 100644 --- a/parser.py +++ b/parser.py @@ -93,26 +93,17 @@ def generate_documentation(self, module_path: str, options: Dict[str, Any]) -> s if member.docstring: member.docstring.parsed = griffe.parse_google(member.docstring) - # Create ConfigDict with the options - # Adjust default options based on object type - if hasattr(obj, "kind") and obj.kind.value == "function": - # Configuration for functions - default_options = { - "docstring_section_style": "table", - "heading_level": 3, - "show_root_heading": True, - "show_source": True, - "show_signature": True, - } - else: + default_options = { + "docstring_section_style": "table", + "heading_level": 3, + "show_root_heading": True, + "show_source": True, + } + + + if hasattr(obj, "kind") and obj.kind.value != "function": # Configuration for classes and modules - default_options = { - "docstring_section_style": "table", - "heading_level": 3, - "show_root_heading": True, - "show_source": True, - "summary": {"functions": False}, - } + default_options["summary"] = {"functions": False} default_options.update(options) config = ConfigDict(**default_options) From 7c257e9d824c5bdbd54f72fa8fff0b07d46d674a Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 21:13:52 -0500 Subject: [PATCH 11/31] tests for `datasetsforecast` --- .github/workflows/pytest.yml | 2 +- tests/test_datasetsforecast.py | 75 ++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 tests/test_datasetsforecast.py diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 7dd08d8..372e0b0 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -21,7 +21,7 @@ jobs: python-version: 3.11 - name: Install dependencies - run: pip install griffe2md rich pyyaml coreforecast utilsforecast pytest + run: pip install griffe2md rich pyyaml coreforecast utilsforecast datasetsforecast pytest - name: Run tests run: pytest tests diff --git a/tests/test_datasetsforecast.py b/tests/test_datasetsforecast.py new file mode 100644 index 0000000..bfcbc33 --- /dev/null +++ b/tests/test_datasetsforecast.py @@ -0,0 +1,75 @@ +def test_yearly_dataclass(setup_parser): + fn = "::: datasetsforecast.m3.Yearly" + rendered = setup_parser.process_markdown(fn) + + assert rendered == """### `Yearly` + +```python +Yearly( + seasonality=1, + horizon=6, + freq="Y", + sheet_name="M3Year", + name="Yearly", + n_ts=645, +) +``` + +#### `Yearly.freq` + +```python +freq: str = 'Y' +``` + +#### `Yearly.horizon` + +```python +horizon: int = 6 +``` + +#### `Yearly.n_ts` + +```python +n_ts: int = 645 +``` + +#### `Yearly.name` + +```python +name: str = 'Yearly' +``` + +#### `Yearly.seasonality` + +```python +seasonality: int = 1 +``` + +#### `Yearly.sheet_name` + +```python +sheet_name: str = 'M3Year' +``` +""" + + +def test_download_file(setup_parser): + fn = """::: datasetsforecast.utils.download_file""" + rendered = setup_parser.process_markdown(fn) + + assert rendered == """### `download_file` + +```python +download_file(directory, source_url, decompress=False) +``` + +Download data from source_ulr inside directory. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`directory` | ([str](#str), [Path](#pathlib.Path)) | Custom directory where data will be downloaded. | *required* +`source_url` | [str](#str) | URL where data is hosted. | *required* +`decompress` | [bool](#bool) | Wheter decompress downloaded file. Default False. | False +""" \ No newline at end of file From 08d3705bff0d7a99de520d1b09493211a244e7ff Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 21:14:21 -0500 Subject: [PATCH 12/31] make pytest output verbose --- pytest.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 6e1cfdf..949b1ee 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,2 +1,3 @@ [tool:pytest] -testpaths = tests \ No newline at end of file +testpaths = tests +addopts = -v \ No newline at end of file From 672b415b56ee8350d3ef3c34ad8cac18e3311a4f Mon Sep 17 00:00:00 2001 From: deven367 Date: Tue, 4 Nov 2025 21:14:35 -0500 Subject: [PATCH 13/31] clean-up --- .github/workflows/pytest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index 372e0b0..b93d676 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -24,4 +24,4 @@ jobs: run: pip install griffe2md rich pyyaml coreforecast utilsforecast datasetsforecast pytest - name: Run tests - run: pytest tests + run: pytest From 217b416087559ea2638062d1d1404ef97940a651 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:06:29 +0000 Subject: [PATCH 14/31] mark tests as `datasets` --- tests/test_datasetsforecast.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/test_datasetsforecast.py b/tests/test_datasetsforecast.py index bfcbc33..cda23bb 100644 --- a/tests/test_datasetsforecast.py +++ b/tests/test_datasetsforecast.py @@ -1,3 +1,7 @@ +import pytest + + +@pytest.mark.datasets def test_yearly_dataclass(setup_parser): fn = "::: datasetsforecast.m3.Yearly" rendered = setup_parser.process_markdown(fn) @@ -52,7 +56,7 @@ def test_yearly_dataclass(setup_parser): ``` """ - +@pytest.mark.datasets def test_download_file(setup_parser): fn = """::: datasetsforecast.utils.download_file""" rendered = setup_parser.process_markdown(fn) From 7245a94dbbc0caaa4da166519aebe7335f971f92 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:06:43 +0000 Subject: [PATCH 15/31] fix ini file and add marker --- pytest.ini | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pytest.ini b/pytest.ini index 949b1ee..102ad37 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,3 +1,5 @@ -[tool:pytest] +[pytest] testpaths = tests -addopts = -v \ No newline at end of file +addopts = -vv +markers = + datasets: tests for datasetsforecast \ No newline at end of file From 29ed0c54c43852fa82df52226908fc835e2e1153 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:08:59 +0000 Subject: [PATCH 16/31] skip tests for datasetsforecast for now (unknown issue for now, runs locally but not in actions) --- pytest.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 102ad37..3feab2d 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] testpaths = tests -addopts = -vv +addopts = -vv -m "not datasets" markers = datasets: tests for datasetsforecast \ No newline at end of file From e07a86450c211b5fb80b9702b6522c78afc4527d Mon Sep 17 00:00:00 2001 From: deven367 Date: Mon, 10 Nov 2025 11:21:59 -0500 Subject: [PATCH 17/31] update README with the Notion link --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index a391c4b..e1ee706 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,9 @@ # mkdocstring-parser -This repo creates a simple parser for mkdocstrings. The idea is simple, given a `mkdocstrings` signature block, replace it with it rendered markdown in-place +This repo creates a simple parser for mkdocstrings. The idea is simple, given +a `mkdocstrings` signature block, replace it with it rendered markdown in-place. + +To view the complete guide on generating Nixtlaverse documentation, click [here](https://www.notion.so/nixtla/Env-Setup-1aa34e7ecc8c804096f7d0915aa4f2d5?source=copy_link#2a334e7ecc8c8006901aed0eb6ebd2f0) ## example From 2624a94d6dc9f1907701f1734f9c6ba9dd490691 Mon Sep 17 00:00:00 2001 From: deven367 Date: Thu, 13 Nov 2025 10:08:33 -0500 Subject: [PATCH 18/31] tests for `hierarchicalforecast` --- tests/test_hierarchicalforecast.py | 165 +++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 tests/test_hierarchicalforecast.py diff --git a/tests/test_hierarchicalforecast.py b/tests/test_hierarchicalforecast.py new file mode 100644 index 0000000..655b9fa --- /dev/null +++ b/tests/test_hierarchicalforecast.py @@ -0,0 +1,165 @@ +def test_make_future_dataframe(setup_parser): + fn = """::: hierarchicalforecast.utils.make_future_dataframe""" + output = setup_parser.process_markdown(fn) + assert output == """### `make_future_dataframe` + +```python +make_future_dataframe(df, freq, h, id_col='unique_id', time_col='ds') +``` + +Create future dataframe for forecasting. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`df` | [Frame](#narwhals.typing.Frame) | Dataframe with ids, times and values for the exogenous regressors. | *required* +`freq` | [Union](#typing.Union)\[[str](#str), [int](#int)\] | Frequency of the data. Must be a valid pandas or polars offset alias, or an integer. | *required* +`h` | [int](#int) | Forecast horizon. | *required* +`id_col` | [str](#str) | Column that identifies each serie. Default is 'unique_id'. | 'unique_id' +`time_col` | [str](#str) | Column that identifies each timestep, its values can be timestamps or integers. Default is 'ds'. | 'ds' + +**Returns:** + +Name | Type | Description +---- | ---- | ----------- +`FrameT` | [FrameT](#narwhals.typing.FrameT) | DataFrame with future values. +""" + +def test_evaluate(setup_parser): + fn = "::: hierarchicalforecast.evaluation.evaluate" + output = setup_parser.process_markdown(fn) + assert output == """### `evaluate` + +```python +evaluate( + df, + metrics, + tags, + models=None, + train_df=None, + level=None, + id_col="unique_id", + time_col="ds", + target_col="y", + agg_fn="mean", + benchmark=None, +) +``` + +Evaluate hierarchical forecast using different metrics. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`df` | pandas, polars, dask or spark DataFrame | Forecasts to evaluate. Must have `id_col`, `time_col`, `target_col` and models' predictions. | *required* +`metrics` | list of callable | Functions with arguments `df`, `models`, `id_col`, `target_col` and optionally `train_df`. | *required* +`tags` | [dict](#dict) | Each key is a level in the hierarchy and its value contains tags associated to that level. Each key is a level in the hierarchy and its value contains tags associated to that level. | *required* +`models` | list of str | Names of the models to evaluate. If `None` will use every column in the dataframe after removing id, time and target. | None +`train_df` | pandas, polars, dask or spark DataFrame | Training set. Used to evaluate metrics such as `mase`. | None +`level` | list of int | Prediction interval levels. Used to compute losses that rely on quantiles. | None +`id_col` | [str](#str) | Column that identifies each serie. | 'unique_id' +`time_col` | [str](#str) | Column that identifies each timestep, its values can be timestamps or integers. | 'ds' +`target_col` | [str](#str) | Column that contains the target. | 'y' +`agg_fn` | [str](#str) | Statistic to compute on the scores by id to reduce them to a single number. | 'mean' +`benchmark` | [str](#str) | If passed, evaluators are scaled by the error of this benchmark model. | None + +**Returns:** + +Type | Description +---- | ----------- +[FrameT](#narwhals.typing.FrameT) | pandas, polars DataFrame: Metrics with one row per (id, metric) combination and one column per model. If `agg_fn` is not `None`, there is only one row per metric. +""" + +def test_permbu(setup_parser): + fn = """::: hierarchicalforecast.probabilistic_methods.PERMBU + handler: python + options: + docstring_style: google + members: + - get_samples + heading_level: 3 + show_root_heading: true + show_source: true +""" + output = setup_parser.process_markdown(fn) + assert output == """### `PERMBU` + +```python +PERMBU( + S, + tags, + y_hat, + y_insample, + y_hat_insample, + sigmah, + num_samples=None, + seed=0, + P=None, +) +``` + +PERMBU Probabilistic Reconciliation Class. + +The PERMBU method leverages empirical bottom-level marginal distributions +with empirical copula functions (describing bottom-level dependencies) to +generate the distribution of aggregate-level distributions using BottomUp +reconciliation. The sample reordering technique in the PERMBU method reinjects +multivariate dependencies into independent bottom-level samples. + +``` +Algorithm: +1. For all series compute conditional marginals distributions. +2. Compute residuals $\hat{\epsilon}_{i,t}$ and obtain rank permutations. +2. Obtain K-sample from the bottom-level series predictions. +3. Apply recursively through the hierarchical structure:
+ 3.1. For a given aggregate series $i$ and its children series:
+ 3.2. Obtain children's empirical joint using sample reordering copula.
+ 3.2. From the children's joint obtain the aggregate series's samples. +``` + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`S` | [Union](#typing.Union)\[[ndarray](#numpy.ndarray), [spmatrix](#scipy.sparse.spmatrix)\] | np.array, summing matrix of size (`base`, `bottom`). | *required* +`tags` | [dict](#dict)\[[str](#str), [ndarray](#numpy.ndarray)\] | Each key is a level and each value its `S` indices. | *required* +`y_insample` | [ndarray](#numpy.ndarray) | Insample values of size (`base`, `insample_size`). | *required* +`y_hat_insample` | [ndarray](#numpy.ndarray) | Insample point forecasts of size (`base`, `insample_size`). | *required* +`sigmah` | [ndarray](#numpy.ndarray) | np.array, forecast standard dev. of size (`base`, `horizon`). | *required* +`num_samples` | [Optional](#typing.Optional)\[[int](#int)\] | int, number of normal prediction samples generated. | None +`seed` | [int](#int) | int, random seed for numpy generator's replicability. | 0 + +
+References + +- [Taieb, Souhaib Ben and Taylor, James W and Hyndman, Rob J. (2017). "Coherent probabilistic forecasts for hierarchical time series. International conference on machine learning ICML."](https://proceedings.mlr.press/v70/taieb17a.html) + +
+ +#### `PERMBU.get_samples` + +```python +get_samples(num_samples=None) +``` + +PERMBU Sample Reconciliation Method. + +Applies PERMBU reconciliation method as defined by Taieb et. al 2017. +Generating independent base prediction samples, restoring its multivariate +dependence using estimated copula with reordering and applying the BottomUp +aggregation to the new samples. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`num_samples` | [Optional](#typing.Optional)\[[int](#int)\] | int, number of samples generated from coherent distribution. | None + +**Returns:** + +Name | Type | Description +---- | ---- | ----------- +`samples` | | Coherent samples of size (`base`, `horizon`, `num_samples`). +""" \ No newline at end of file From de986556c783f6e2e629e884787b131378e24dd3 Mon Sep 17 00:00:00 2001 From: deven367 Date: Thu, 13 Nov 2025 10:08:50 -0500 Subject: [PATCH 19/31] tests for `statsforecast` --- tests/test_statsforecast.py | 153 ++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100644 tests/test_statsforecast.py diff --git a/tests/test_statsforecast.py b/tests/test_statsforecast.py new file mode 100644 index 0000000..0dd1bd5 --- /dev/null +++ b/tests/test_statsforecast.py @@ -0,0 +1,153 @@ +def test_autoarima_prophet(setup_parser): + fn = """::: statsforecast.adapters.prophet.AutoARIMAProphet + handler: python + options: + docstring_style: google + members: + - fit + - predict + heading_level: 3 + show_root_heading: true + show_source: true""" + output = setup_parser.process_markdown(fn) + assert output == """### `AutoARIMAProphet` + +```python +AutoARIMAProphet( + growth="linear", + changepoints=None, + n_changepoints=25, + changepoint_range=0.8, + yearly_seasonality="auto", + weekly_seasonality="auto", + daily_seasonality="auto", + holidays=None, + seasonality_mode="additive", + seasonality_prior_scale=10.0, + holidays_prior_scale=10.0, + changepoint_prior_scale=0.05, + mcmc_samples=0, + interval_width=0.8, + uncertainty_samples=1000, + stan_backend=None, + d=None, + D=None, + max_p=5, + max_q=5, + max_P=2, + max_Q=2, + max_order=5, + max_d=2, + max_D=1, + start_p=2, + start_q=2, + start_P=1, + start_Q=1, + stationary=False, + seasonal=True, + ic="aicc", + stepwise=True, + nmodels=94, + trace=False, + approximation=False, + method=None, + truncate=None, + test="kpss", + test_kwargs=None, + seasonal_test="seas", + seasonal_test_kwargs=None, + allowdrift=False, + allowmean=False, + blambda=None, + biasadj=False, + period=1, +) +``` + +Bases: [Prophet](#fbprophet.Prophet) + +AutoARIMAProphet adapter. + +Returns best ARIMA model using external variables created by the Prophet interface. +This class receives as parameters the same as prophet.Prophet and uses a `models.AutoARIMA` +backend. + +If your forecasting pipeline uses Prophet the `AutoARIMAProphet` adapter helps to +easily substitute Prophet with an AutoARIMA. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`growth` | str, default="linear" | 'linear', 'logistic' or 'flat' to specify a linear, logistic or flat trend. | 'linear' +`changepoints` | List of dates, default=None | Potential changepoints. Otherwise selected automatically. | None +`n_changepoints` | int, default=25 | Number of potential changepoints to include. | 25 +`changepoint_range` | float, default=0.8 | Proportion of history in which trend changepoints will be estimated. | 0.8 +`yearly_seasonality` | str, bool or int, default="auto" | Fit yearly seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. | 'auto' +`weekly_seasonality` | str, bool or int, default="auto" | Fit weekly seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. | 'auto' +`daily_seasonality` | str, bool or int, default="auto" | Fit daily seasonality. Can be 'auto', True, False, or a number of Fourier terms to generate. | 'auto' +`holidays` | pandas.DataFrame, default=None | DataFrame with columns holiday (string) and ds (date type). | None +`interval_width` | float, default=0.80 | Uncertainty forecast intervals width. `StatsForecast`'s level | 0.8 + +
+Note + +You can create automated exogenous variables from the Prophet data processing pipeline +these exogenous will be included into `AutoARIMA`'s exogenous features. Parameters like +`seasonality_mode`, `seasonality_prior_scale`, `holidays_prior_scale`, `changepoint_prior_scale`, +`mcmc_samples`, `uncertainty_samples`, `stan_backend` are Prophet exclusive. + +
+ +
+References + +[Sean J. Taylor, Benjamin Letham (2017). "Prophet Forecasting at Scale"](https://peerj.com/preprints/3190.pdf) + +[Oskar Triebe, Hansika Hewamalage, Polina Pilyugina, Nikolay Laptev, Christoph Bergmeir, Ram Rajagopal (2021). "NeuralProphet: Explainable Forecasting at Scale".](https://arxiv.org/pdf/2111.15397.pdf) + +[Rob J. Hyndman, Yeasmin Khandakar (2008). "Automatic Time Series Forecasting: The forecast package for R"](https://www.jstatsoft.org/article/view/v027i03). + +
+ +#### `AutoARIMAProphet.fit` + +```python +fit(df, disable_seasonal_features=True) +``` + +Fit the AutoARIMAProphet adapter. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`df` | [DataFrame](#pandas.DataFrame) | DataFrame with columns ds (date type) and y, the time series. | *required* +`disable_seasonal_features` | bool, default=True | Disable Prophet's seasonal features. | True + +**Returns:** + +Name | Type | Description +---- | ---- | ----------- +`AutoARIMAProphet` | | Adapter object with `AutoARIMA` fitted model. + +#### `AutoARIMAProphet.predict` + +```python +predict(df=None) +``` + +Predict using the AutoARIMAProphet adapter. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`df` | pandas.DataFrame, default=None | DataFrame with columns ds (date type) and y, the time series. | None + +**Returns:** + +Type | Description +---- | ----------- +| pandas.DataFrame: DataFrame with the forecast components. +""" \ No newline at end of file From eb9ea3e53d8f8770ef0f509ca12b2d7382a140c8 Mon Sep 17 00:00:00 2001 From: deven367 Date: Thu, 13 Nov 2025 10:08:58 -0500 Subject: [PATCH 20/31] tests for `mlforecast` --- tests/test_mlforecast.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 tests/test_mlforecast.py diff --git a/tests/test_mlforecast.py b/tests/test_mlforecast.py new file mode 100644 index 0000000..8f2f1ff --- /dev/null +++ b/tests/test_mlforecast.py @@ -0,0 +1,13 @@ +def test_distributed_dask_lgb(setup_parser): + fn = "::: mlforecast.distributed.dask.lgb" + output = setup_parser.process_markdown(fn) + assert output == """### `DaskLGBMForecast` + +Bases: [DaskLGBMRegressor](#lightgbm.dask.DaskLGBMRegressor) + +#### `DaskLGBMForecast.model_` + +```python +model_ +``` +""" From 7f0c431ec7b2badc15161fd6962a5da91e273cc7 Mon Sep 17 00:00:00 2001 From: deven367 Date: Thu, 13 Nov 2025 10:09:06 -0500 Subject: [PATCH 21/31] tests for `neuralforecast` --- tests/test_neuralforecast.py | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 tests/test_neuralforecast.py diff --git a/tests/test_neuralforecast.py b/tests/test_neuralforecast.py new file mode 100644 index 0000000..6c3c8f6 --- /dev/null +++ b/tests/test_neuralforecast.py @@ -0,0 +1,31 @@ +def test_timeseriesloader(setup_parser): + fn = "::: neuralforecast.tsdataset.TimeSeriesLoader" + output = setup_parser.process_markdown(fn) + assert output == """### `TimeSeriesLoader` + +```python +TimeSeriesLoader(dataset, **kwargs) +``` + +Bases: [DataLoader](#torch.utils.data.DataLoader) + +TimeSeriesLoader DataLoader. + +Small change to PyTorch's Data loader. +Combines a dataset and a sampler, and provides an iterable over the given dataset. + +The class `~torch.utils.data.DataLoader` supports both map-style and +iterable-style datasets with single- or multi-process loading, customizing +loading order and optional automatic batching (collation) and memory pinning. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`dataset` | | Dataset to load data from. | *required* +`batch_size` | [int](#int) | How many samples per batch to load. Defaults to 1. | *required* +`shuffle` | [bool](#bool) | Set to True to have the data reshuffled at every epoch. Defaults to False. | *required* +`sampler` | [Sampler](#Sampler) or [Iterable](#Iterable) | Defines the strategy to draw samples from the dataset. | *required* +`drop_last` | [bool](#bool) | Set to True to drop the last incomplete batch. Defaults to False. | *required* +`**kwargs` | | Additional keyword arguments for DataLoader. | {} +""" \ No newline at end of file From 05bf3e79fb23dc38828f99cdb270e40053a896ab Mon Sep 17 00:00:00 2001 From: deven367 Date: Thu, 13 Nov 2025 10:10:38 -0500 Subject: [PATCH 22/31] install missing deps --- .github/workflows/pytest.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index b93d676..a82b650 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Clone repo - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + uses: actions/checkout@v4 - name: Setup python uses: actions/setup-python@v4 @@ -21,7 +21,9 @@ jobs: python-version: 3.11 - name: Install dependencies - run: pip install griffe2md rich pyyaml coreforecast utilsforecast datasetsforecast pytest + run: | + pip install griffe2md rich pyyaml neuralforecast "mlforecast[dask]" statsforecast \\ + nixtla coreforecast utilsforecast datasetsforecast pytest - name: Run tests run: pytest From 690eaa35ce6d94a4fb92787c2aac594c6ca22515 Mon Sep 17 00:00:00 2001 From: deven367 Date: Thu, 13 Nov 2025 10:12:22 -0500 Subject: [PATCH 23/31] fix syntax --- .github/workflows/pytest.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index a82b650..e226618 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -22,8 +22,7 @@ jobs: - name: Install dependencies run: | - pip install griffe2md rich pyyaml neuralforecast "mlforecast[dask]" statsforecast \\ - nixtla coreforecast utilsforecast datasetsforecast pytest + pip install griffe2md rich pyyaml neuralforecast "mlforecast[dask]" statsforecast nixtla coreforecast utilsforecast datasetsforecast pytest - name: Run tests run: pytest From 3374c8a3d6363a498fc46cfffdb906c18c3fb7be Mon Sep 17 00:00:00 2001 From: deven367 Date: Thu, 13 Nov 2025 10:19:13 -0500 Subject: [PATCH 24/31] migrate to a clean requirements.txt --- .github/workflows/pytest.yml | 3 ++- requirements.txt | 12 ++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 requirements.txt diff --git a/.github/workflows/pytest.yml b/.github/workflows/pytest.yml index e226618..0899d49 100644 --- a/.github/workflows/pytest.yml +++ b/.github/workflows/pytest.yml @@ -19,10 +19,11 @@ jobs: uses: actions/setup-python@v4 with: python-version: 3.11 + cache: 'pip' - name: Install dependencies run: | - pip install griffe2md rich pyyaml neuralforecast "mlforecast[dask]" statsforecast nixtla coreforecast utilsforecast datasetsforecast pytest + pip install -r requirements.txt - name: Run tests run: pytest diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..1fe838b --- /dev/null +++ b/requirements.txt @@ -0,0 +1,12 @@ +griffe2md +rich +pyyaml +neuralforecast +mlforecast[dask] +statsforecast +hierarchicalforecast +nixtla +coreforecast +utilsforecast +datasetsforecast +pytest From 6ddb2cb9c61d442aeb9509cc23a53b21b26cb7c0 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Thu, 13 Nov 2025 15:41:02 +0000 Subject: [PATCH 25/31] fix tests for hierarchical --- tests/test_hierarchicalforecast.py | 26 ++------------------------ 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/tests/test_hierarchicalforecast.py b/tests/test_hierarchicalforecast.py index 655b9fa..4bab9e0 100644 --- a/tests/test_hierarchicalforecast.py +++ b/tests/test_hierarchicalforecast.py @@ -32,19 +32,7 @@ def test_evaluate(setup_parser): assert output == """### `evaluate` ```python -evaluate( - df, - metrics, - tags, - models=None, - train_df=None, - level=None, - id_col="unique_id", - time_col="ds", - target_col="y", - agg_fn="mean", - benchmark=None, -) +evaluate(df, metrics, tags, models=None, train_df=None, level=None, id_col='unique_id', time_col='ds', target_col='y', agg_fn='mean', benchmark=None) ``` Evaluate hierarchical forecast using different metrics. @@ -87,17 +75,7 @@ def test_permbu(setup_parser): assert output == """### `PERMBU` ```python -PERMBU( - S, - tags, - y_hat, - y_insample, - y_hat_insample, - sigmah, - num_samples=None, - seed=0, - P=None, -) +PERMBU(S, tags, y_hat, y_insample, y_hat_insample, sigmah, num_samples=None, seed=0, P=None) ``` PERMBU Probabilistic Reconciliation Class. From 943773ce7b2f2da2394793efea92d6bf4da02ba1 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Thu, 13 Nov 2025 15:41:10 +0000 Subject: [PATCH 26/31] fix tests for statsforecast --- tests/test_statsforecast.py | 50 +------------------------------------ 1 file changed, 1 insertion(+), 49 deletions(-) diff --git a/tests/test_statsforecast.py b/tests/test_statsforecast.py index 0dd1bd5..7c55bdf 100644 --- a/tests/test_statsforecast.py +++ b/tests/test_statsforecast.py @@ -13,55 +13,7 @@ def test_autoarima_prophet(setup_parser): assert output == """### `AutoARIMAProphet` ```python -AutoARIMAProphet( - growth="linear", - changepoints=None, - n_changepoints=25, - changepoint_range=0.8, - yearly_seasonality="auto", - weekly_seasonality="auto", - daily_seasonality="auto", - holidays=None, - seasonality_mode="additive", - seasonality_prior_scale=10.0, - holidays_prior_scale=10.0, - changepoint_prior_scale=0.05, - mcmc_samples=0, - interval_width=0.8, - uncertainty_samples=1000, - stan_backend=None, - d=None, - D=None, - max_p=5, - max_q=5, - max_P=2, - max_Q=2, - max_order=5, - max_d=2, - max_D=1, - start_p=2, - start_q=2, - start_P=1, - start_Q=1, - stationary=False, - seasonal=True, - ic="aicc", - stepwise=True, - nmodels=94, - trace=False, - approximation=False, - method=None, - truncate=None, - test="kpss", - test_kwargs=None, - seasonal_test="seas", - seasonal_test_kwargs=None, - allowdrift=False, - allowmean=False, - blambda=None, - biasadj=False, - period=1, -) +AutoARIMAProphet(growth='linear', changepoints=None, n_changepoints=25, changepoint_range=0.8, yearly_seasonality='auto', weekly_seasonality='auto', daily_seasonality='auto', holidays=None, seasonality_mode='additive', seasonality_prior_scale=10.0, holidays_prior_scale=10.0, changepoint_prior_scale=0.05, mcmc_samples=0, interval_width=0.8, uncertainty_samples=1000, stan_backend=None, d=None, D=None, max_p=5, max_q=5, max_P=2, max_Q=2, max_order=5, max_d=2, max_D=1, start_p=2, start_q=2, start_P=1, start_Q=1, stationary=False, seasonal=True, ic='aicc', stepwise=True, nmodels=94, trace=False, approximation=False, method=None, truncate=None, test='kpss', test_kwargs=None, seasonal_test='seas', seasonal_test_kwargs=None, allowdrift=False, allowmean=False, blambda=None, biasadj=False, period=1) ``` Bases: [Prophet](#fbprophet.Prophet) From 45843cc4bc3b0b99180df6f9241da2f573812db8 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Thu, 13 Nov 2025 16:20:27 +0000 Subject: [PATCH 27/31] add logic to parse additional types of docstrings --- parser.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/parser.py b/parser.py index 9d50523..38d4ba2 100644 --- a/parser.py +++ b/parser.py @@ -82,16 +82,29 @@ def generate_documentation(self, module_path: str, options: Dict[str, Any]) -> s except: pass # Fall back to griffe's detection + # Determine which parser to use based on docstring_style option + docstring_style = options.get("docstring_style", "google") + + # Map docstring style to parser function + parser_map = { + "google": griffe.parse_google, + "numpy": griffe.parse_numpy, + "sphinx": griffe.parse_sphinx, + "auto": griffe.parse_auto, + } + + parser_func = parser_map.get(docstring_style, griffe.parse_google) + if obj.docstring: - # Force parsing with Google parser to get structured sections - obj.docstring.parsed = griffe.parse_google(obj.docstring) + # Parse with the appropriate parser to get structured sections + obj.docstring.parsed = parser_func(obj.docstring) # Handle different object types if hasattr(obj, "members"): # This is a class or module - parse docstrings for all methods/functions for member_name, member in obj.members.items(): if member.docstring: - member.docstring.parsed = griffe.parse_google(member.docstring) + member.docstring.parsed = parser_func(member.docstring) default_options = { "docstring_section_style": "table", From 34b85444e326a5f58d347ea3b3b6249e43b62321 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Thu, 13 Nov 2025 16:20:47 +0000 Subject: [PATCH 28/31] fix tests for datasets --- tests/test_datasetsforecast.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/tests/test_datasetsforecast.py b/tests/test_datasetsforecast.py index cda23bb..367febc 100644 --- a/tests/test_datasetsforecast.py +++ b/tests/test_datasetsforecast.py @@ -9,14 +9,7 @@ def test_yearly_dataclass(setup_parser): assert rendered == """### `Yearly` ```python -Yearly( - seasonality=1, - horizon=6, - freq="Y", - sheet_name="M3Year", - name="Yearly", - n_ts=645, -) +Yearly(seasonality=1, horizon=6, freq='Y', sheet_name='M3Year', name='Yearly', n_ts=645) ``` #### `Yearly.freq` @@ -58,7 +51,13 @@ def test_yearly_dataclass(setup_parser): @pytest.mark.datasets def test_download_file(setup_parser): - fn = """::: datasetsforecast.utils.download_file""" + fn = """::: datasetsforecast.utils.download_file + handler: python + options: + docstring_style: numpy + heading_level: 3 + show_root_heading: true + show_source: true""" rendered = setup_parser.process_markdown(fn) assert rendered == """### `download_file` @@ -73,7 +72,7 @@ def test_download_file(setup_parser): Name | Type | Description | Default ---- | ---- | ----------- | ------- -`directory` | ([str](#str), [Path](#pathlib.Path)) | Custom directory where data will be downloaded. | *required* +`directory` | [str](#str) | Custom directory where data will be downloaded. | *required* `source_url` | [str](#str) | URL where data is hosted. | *required* `decompress` | [bool](#bool) | Wheter decompress downloaded file. Default False. | False """ \ No newline at end of file From 153a83859c13418f120ce1ac9c64ba3b290f54ed Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Thu, 13 Nov 2025 16:21:02 +0000 Subject: [PATCH 29/31] add more tests for hierarchical --- tests/test_hierarchicalforecast.py | 95 ++++++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/tests/test_hierarchicalforecast.py b/tests/test_hierarchicalforecast.py index 4bab9e0..58c9ef8 100644 --- a/tests/test_hierarchicalforecast.py +++ b/tests/test_hierarchicalforecast.py @@ -140,4 +140,99 @@ def test_permbu(setup_parser): Name | Type | Description ---- | ---- | ----------- `samples` | | Coherent samples of size (`base`, `horizon`, `num_samples`). +""" + +def test_bottomup(setup_parser): + fn = """::: hierarchicalforecast.methods.BottomUp + handler: python + options: + docstring_style: google + members: + - fit + - predict + - fit_predict + - sample + inherited_members: false + heading_level: 3 + show_root_heading: true + show_source: true""" + output = setup_parser.process_markdown(fn) + assert output == """### `BottomUp` + +Bases: [HReconciler](#hierarchicalforecast.methods.HReconciler) + +Bottom Up Reconciliation Class. + +The most basic hierarchical reconciliation is performed using an Bottom-Up strategy. It was proposed for +the first time by Orcutt in 1968. +The corresponding hierarchical "projection" matrix is defined as: +$$ +\\\\mathbf{P}_{\\\\text{BU}} = \\[\\\\mathbf{0}_{\\\\mathrm{[b],[a]}};|;\\\\mathbf{I}\\_{\\\\mathrm{[b][b]}}\\] +$$ + +
+References + +- [Orcutt, G.H., Watts, H.W., & Edwards, J.B.(1968). "Data aggregation and information loss". The American Economic Review, 58 , 773(787)](http://www.jstor.org/stable/1815532). + +
+ +#### `BottomUp.fit` + +```python +fit(S, y_hat, idx_bottom, y_insample=None, y_hat_insample=None, sigmah=None, intervals_method=None, num_samples=None, seed=None, tags=None) +``` + +Bottom Up Fit Method. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`S` | [ndarray](#numpy.ndarray) | Summing matrix of size (`base`, `bottom`). | *required* +`y_hat` | [ndarray](#numpy.ndarray) | Forecast values of size (`base`, `horizon`). | *required* +`idx_bottom` | [ndarray](#numpy.ndarray) | Indices corresponding to the bottom level of `S`, size (`bottom`). | *required* +`y_insample` | [Optional](#typing.Optional)\[[ndarray](#numpy.ndarray)\] | In-sample values of size (`base`, `horizon`). Default is None. | None +`y_hat_insample` | [Optional](#typing.Optional)\[[ndarray](#numpy.ndarray)\] | In-sample forecast values of size (`base`, `horizon`). Default is None. | None +`sigmah` | [Optional](#typing.Optional)\[[ndarray](#numpy.ndarray)\] | Estimated standard deviation of the conditional marginal distribution. Default is None. | None +`intervals_method` | [Optional](#typing.Optional)\[[str](#str)\] | Sampler for prediction intervals, one of `normality`, `bootstrap`, `permbu`. Default is None. | None +`num_samples` | [Optional](#typing.Optional)\[[int](#int)\] | Number of samples for probabilistic coherent distribution. Default is None. | None +`seed` | [Optional](#typing.Optional)\[[int](#int)\] | Seed for reproducibility. Default is None. | None +`tags` | [Optional](#typing.Optional)\[[dict](#dict)\[[str](#str), [ndarray](#numpy.ndarray)\]\] | Tags for hierarchical structure. Default is None. | None + +**Returns:** + +Name | Type | Description +---- | ---- | ----------- +`BottomUp` | | object, fitted reconciler. + +#### `BottomUp.fit_predict` + +```python +fit_predict(S, y_hat, idx_bottom, y_insample=None, y_hat_insample=None, sigmah=None, level=None, intervals_method=None, num_samples=None, seed=None, tags=None) +``` + +BottomUp Reconciliation Method. + +**Parameters:** + +Name | Type | Description | Default +---- | ---- | ----------- | ------- +`S` | [ndarray](#numpy.ndarray) | Summing matrix of size (`base`, `bottom`). | *required* +`y_hat` | [ndarray](#numpy.ndarray) | Forecast values of size (`base`, `horizon`). | *required* +`idx_bottom` | [ndarray](#numpy.ndarray) | Indices corresponding to the bottom level of `S`, size (`bottom`). | *required* +`y_insample` | [Optional](#typing.Optional)\[[ndarray](#numpy.ndarray)\] | In-sample values of size (`base`, `insample_size`). Default is None. | None +`y_hat_insample` | [Optional](#typing.Optional)\[[ndarray](#numpy.ndarray)\] | In-sample forecast values of size (`base`, `insample_size`). Default is None. | None +`sigmah` | [Optional](#typing.Optional)\[[ndarray](#numpy.ndarray)\] | Estimated standard deviation of the conditional marginal distribution. Default is None. | None +`level` | [Optional](#typing.Optional)\[[list](#list)\[[int](#int)\]\] | float list 0-100, confidence levels for prediction intervals. Default is None. | None +`intervals_method` | [Optional](#typing.Optional)\[[str](#str)\] | Sampler for prediction intervals, one of `normality`, `bootstrap`, `permbu`. Default is None. | None +`num_samples` | [Optional](#typing.Optional)\[[int](#int)\] | Number of samples for probabilistic coherent distribution. Default is None. | None +`seed` | [Optional](#typing.Optional)\[[int](#int)\] | Seed for reproducibility. Default is None. | None +`tags` | [Optional](#typing.Optional)\[[dict](#dict)\[[str](#str), [ndarray](#numpy.ndarray)\]\] | Tags for hierarchical structure. Default is None. | None + +**Returns:** + +Name | Type | Description +---- | ---- | ----------- +`dict` | | y_tilde: Reconciliated y_hat using the Bottom Up approach. """ \ No newline at end of file From c7e389878271810d1d0a5a5f5450c7a5f002aad8 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Thu, 13 Nov 2025 18:08:10 +0000 Subject: [PATCH 30/31] fix test for `mlforecast` --- tests/test_mlforecast.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_mlforecast.py b/tests/test_mlforecast.py index 8f2f1ff..06b4f25 100644 --- a/tests/test_mlforecast.py +++ b/tests/test_mlforecast.py @@ -1,5 +1,5 @@ def test_distributed_dask_lgb(setup_parser): - fn = "::: mlforecast.distributed.dask.lgb" + fn = "::: mlforecast.distributed.models.dask.lgb.DaskLGBMForecast" output = setup_parser.process_markdown(fn) assert output == """### `DaskLGBMForecast` From f250e4d5e98d7920dd263309c26fd742dd4c4567 Mon Sep 17 00:00:00 2001 From: Deven Mistry <31466137+deven367@users.noreply.github.com> Date: Thu, 13 Nov 2025 18:08:51 +0000 Subject: [PATCH 31/31] test `datasets` as well --- pytest.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pytest.ini b/pytest.ini index 3feab2d..102ad37 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,5 +1,5 @@ [pytest] testpaths = tests -addopts = -vv -m "not datasets" +addopts = -vv markers = datasets: tests for datasetsforecast \ No newline at end of file