Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

keep only observed combinations in aggregate #242

Merged
merged 4 commits into from
Nov 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions hierarchicalforecast/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# %% auto 0
__all__ = ['HierarchicalReconciliation']

# %% ../nbs/core.ipynb 3
# %% ../nbs/core.ipynb 4
import re
import gc
import time
Expand All @@ -17,7 +17,7 @@
import numpy as np
import pandas as pd

# %% ../nbs/core.ipynb 5
# %% ../nbs/core.ipynb 6
def _build_fn_name(fn) -> str:
fn_name = type(fn).__name__
func_params = fn.__dict__
Expand All @@ -37,7 +37,7 @@ def _build_fn_name(fn) -> str:
fn_name += '_' + '_'.join(func_params)
return fn_name

# %% ../nbs/core.ipynb 9
# %% ../nbs/core.ipynb 10
def _reverse_engineer_sigmah(Y_hat_df, y_hat, model_name):
"""
This function assumes that the model creates prediction intervals
Expand Down Expand Up @@ -73,7 +73,7 @@ def _reverse_engineer_sigmah(Y_hat_df, y_hat, model_name):

return sigmah

# %% ../nbs/core.ipynb 10
# %% ../nbs/core.ipynb 11
class HierarchicalReconciliation:
"""Hierarchical Reconciliation Class.

Expand Down
7 changes: 6 additions & 1 deletion hierarchicalforecast/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,13 @@ def aggregate(
aggs = []
tags = {}
for levels in spec:
agg = df.groupby(levels + ['ds'])['y'].sum().reset_index('ds')
agg = df.groupby(levels + ['ds'], observed=True)['y'].sum()
if not agg.index.is_monotonic_increasing:
agg = agg.sort_index()
agg = agg.reset_index('ds')
group = agg.index.get_level_values(0)
if not pd.api.types.is_string_dtype(group.dtype):
group = group.astype(str)
for level in levels[1:]:
group = group + '/' + agg.index.get_level_values(level).str.replace('/', '_')
agg.index = group
Expand Down
25 changes: 24 additions & 1 deletion nbs/core.ipynb
Original file line number Diff line number Diff line change
@@ -1,5 +1,16 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"#| hide\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": null,
Expand Down Expand Up @@ -579,7 +590,19 @@
"\n",
"# getting df\n",
"hier_grouped_df, S_grouped_df, tags_grouped = aggregate(df, hierS_grouped_df)\n",
"hier_strict_df, S_strict, tags_strict = aggregate(df, hiers_strictly)"
"hier_strict_df, S_strict, tags_strict = aggregate(df, hiers_strictly)\n",
"\n",
"# check categorical input produces same output\n",
"df2 = df.copy()\n",
"for col in ['Country', 'State', 'Purpose', 'Region']:\n",
" df2[col] = df2[col].astype('category')\n",
"\n",
"for spec in [hierS_grouped_df, hiers_strictly]:\n",
" Y_orig, S_orig, tags_orig = aggregate(df, spec)\n",
" Y_cat, S_cat, tags_cat = aggregate(df2, spec)\n",
" pd.testing.assert_frame_equal(Y_cat, Y_orig)\n",
" pd.testing.assert_frame_equal(S_cat, S_orig)\n",
" assert all(np.array_equal(tags_orig[k], tags_cat[k]) for k in tags_orig.keys())"
]
},
{
Expand Down
19 changes: 16 additions & 3 deletions nbs/utils.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -310,8 +310,13 @@
" aggs = []\n",
" tags = {}\n",
" for levels in spec:\n",
" agg = df.groupby(levels + ['ds'])['y'].sum().reset_index('ds')\n",
" agg = df.groupby(levels + ['ds'], observed=True)['y'].sum()\n",
" if not agg.index.is_monotonic_increasing:\n",
" agg = agg.sort_index()\n",
" agg = agg.reset_index('ds')\n",
" group = agg.index.get_level_values(0)\n",
" if not pd.api.types.is_string_dtype(group.dtype):\n",
" group = group.astype(str)\n",
" for level in levels[1:]:\n",
" group = group + '/' + agg.index.get_level_values(level).str.replace('/', '_')\n",
" agg.index = group\n",
Expand Down Expand Up @@ -354,7 +359,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "075b8d76-b206-4ca6-8722-dd60e4c3b535",
"id": "82e70572-9c01-466d-a3e9-7667b92def2c",
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -394,7 +399,15 @@
" 'country/cat1/cat2': ['COUNTRY/a/1', 'COUNTRY/a/2', 'COUNTRY/a/3','COUNTRY/b/2'],\n",
"}\n",
"for k, actual in tags.items():\n",
" test_eq(actual, expected_tags[k])"
" test_eq(actual, expected_tags[k])\n",
"\n",
"# test categoricals don't produce all combinations\n",
"df2 = df.copy()\n",
"for col in ('country', 'cat1', 'cat2'):\n",
" df2[col] = df2[col].astype('category')\n",
"\n",
"Y_df2, *_ = aggregate(df2, spec)\n",
"assert Y_df.shape[0] == Y_df2.shape[0]"
]
},
{
Expand Down