Skip to content

Commit

Permalink
Merge pull request #241 from winedarksea/dev
Browse files Browse the repository at this point in the history
0.6.12
  • Loading branch information
winedarksea committed May 6, 2024
2 parents a2a464c + 5e0ea06 commit 7a843d0
Show file tree
Hide file tree
Showing 47 changed files with 1,917 additions and 299 deletions.
12 changes: 7 additions & 5 deletions TODO.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,14 @@
* Forecasts are desired for the future immediately following the most recent data.
* trimmed_mean to AverageValueNaive

# 0.6.11 🇺🇦 🇺🇦 🇺🇦
# 0.6.12 🇺🇦 🇺🇦 🇺🇦
* bug fixes
* continually trying to keep up with the Pandas maintainers who are breaking stuff for no good reasonable
* updated RollingMeanTransformer and RegressionFilter, RegressionFilter should now be less memory intensive
* EIA data call to load_live_daily
* horizontal_ensemble_validation arg for more complete validation on these ensembles
* added DMD model
* modified the `constraints` options so it now accepts of list of dictionaries of constraints with new last_window and slope options
* 'dampening' as a constraint method to dampen all forecasts, fixed Cassandra trend_phi dampening
* new med_diff anomaly method and 'laplace' added as distribution option
* modified fourier_df to now work with sub daily data
* some madness with wavelets attempting to use them like fourier series for seasonality

### Unstable Upstream Pacakges (those that are frequently broken by maintainers)
* Pytorch-Forecasting
Expand Down
2 changes: 1 addition & 1 deletion autots/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from autots.models.cassandra import Cassandra


__version__ = '0.6.11'
__version__ = '0.6.12'

TransformTS = GeneralTransformer

Expand Down
105 changes: 76 additions & 29 deletions autots/evaluator/auto_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@
DynamicFactorMQ,
)
from autots.models.arch import ARCH
from autots.models.matrix_var import RRVAR, MAR, TMF, LATC
from autots.models.matrix_var import RRVAR, MAR, TMF, LATC, DMD


def create_model_id(
Expand Down Expand Up @@ -698,6 +698,17 @@ def ModelMonster(
n_jobs=n_jobs,
**parameters,
)
elif model == 'DMD':
return DMD(
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
**parameters,
)
elif model == "":
raise AttributeError(
("Model name is empty. Likely this means AutoTS has not been fit.")
Expand Down Expand Up @@ -864,7 +875,7 @@ def predict(self, forecast_length=None, future_regressor=None):
if not self._fit_complete:
raise ValueError("Model not yet fit.")
df_forecast = self.model.predict(
forecast_length=self.forecast_length, future_regressor=future_regressor
forecast_length=forecast_length, future_regressor=future_regressor
)

# THIS CHECKS POINT FORECAST FOR NULLS BUT NOT UPPER/LOWER FORECASTS
Expand Down Expand Up @@ -896,11 +907,13 @@ def predict(self, forecast_length=None, future_regressor=None):
# CHECK Forecasts are proper length!
if df_forecast.forecast.shape[0] != self.forecast_length:
raise ValueError(
f"Model {self.model_str} returned improper forecast_length"
f"Model {self.model_str} returned improper forecast_length. Returned: {df_forecast.forecast.shape[0]} and requested: {self.forecast_length}"
)

if df_forecast.forecast.shape[1] != self.df.shape[1]:
raise ValueError("Model failed to return correct number of series.")
raise ValueError(
f"Model failed to return correct number of series. Returned {df_forecast.forecast.shape[1]} and requested: {self.df.shape[1]}"
)

df_forecast.transformation_parameters = self.transformation_dict
# Remove negatives if desired
Expand All @@ -911,33 +924,53 @@ def predict(self, forecast_length=None, future_regressor=None):
df_forecast.upper_forecast = df_forecast.upper_forecast.clip(lower=0)

if self.constraint is not None:
if isinstance(self.constraint, dict):
constraint_method = self.constraint.get("constraint_method", "quantile")
constraint_regularization = self.constraint.get(
"constraint_regularization", 1
if isinstance(self.constraint, list):
constraints = self.constraint
df_forecast = df_forecast.apply_constraints(
constraints=constraints,
df_train=self.df,
)
lower_constraint = self.constraint.get("lower_constraint", 0)
upper_constraint = self.constraint.get("upper_constraint", 1)
bounds = self.constraint.get("bounds", False)
else:
constraint_method = "stdev_min"
lower_constraint = float(self.constraint)
upper_constraint = float(self.constraint)
constraint_regularization = 1
bounds = False
if self.verbose > 3:
print(
f"Using constraint with method: {constraint_method}, {constraint_regularization}, {lower_constraint}, {upper_constraint}, {bounds}"
)
constraints = None
if isinstance(self.constraint, dict):
if "constraints" in self.constraint.keys():
constraints = self.constraint.get("constraints")
constraint_method = None
constraint_regularization = None
lower_constraint = None
upper_constraint = None
bounds = True
else:
constraint_method = self.constraint.get(
"constraint_method", "quantile"
)
constraint_regularization = self.constraint.get(
"constraint_regularization", 1
)
lower_constraint = self.constraint.get("lower_constraint", 0)
upper_constraint = self.constraint.get("upper_constraint", 1)
bounds = self.constraint.get("bounds", False)
else:
constraint_method = "stdev_min"
lower_constraint = float(self.constraint)
upper_constraint = float(self.constraint)
constraint_regularization = 1
bounds = False
if self.verbose > 3:
print(
f"Using constraint with method: {constraint_method}, {constraint_regularization}, {lower_constraint}, {upper_constraint}, {bounds}"
)

df_forecast = df_forecast.apply_constraints(
constraint_method,
constraint_regularization,
upper_constraint,
lower_constraint,
bounds,
self.df,
)
print(constraints)
df_forecast = df_forecast.apply_constraints(
constraints,
self.df,
constraint_method,
constraint_regularization,
upper_constraint,
lower_constraint,
bounds,
)

self.transformation_runtime = self.transformation_runtime + (
datetime.datetime.now() - transformationStartTime
Expand Down Expand Up @@ -966,6 +999,18 @@ def fit_data(self, df, future_regressor=None):
self.df = df
self.model.fit_data(df, future_regressor)

def fit_predict(
self,
df,
forecast_length,
future_regressor_train=None,
future_regressor_forecast=None,
):
self.fit(df, future_regressor=future_regressor_train)
return self.predict(
forecast_length=forecast_length, future_regressor=future_regressor_forecast
)


class TemplateEvalObject(object):
"""Object to contain all the failures!.
Expand Down Expand Up @@ -2119,7 +2164,9 @@ def NewGeneticTemplate(

# filter existing templates
sorted_results = model_results[
(model_results['Ensemble'] == 0) & (model_results['Exceptions'].isna())
(model_results['Ensemble'] == 0)
& (model_results['Exceptions'].isna())
& (model_results['Model'].isin(model_list))
].copy()
# remove duplicates by exact same performance
sorted_results = sorted_results.sort_values(
Expand Down
31 changes: 27 additions & 4 deletions autots/evaluator/auto_ts.py
Original file line number Diff line number Diff line change
Expand Up @@ -1075,6 +1075,7 @@ def fit_data(
preclean=None,
verbose=0,
)
return self

def fit(
self,
Expand Down Expand Up @@ -1826,8 +1827,10 @@ def _run_template(
self.model_count = template_result.model_count
# capture results from lower-level template run
if "TotalRuntime" in template_result.model_results.columns:
template_result.model_results['TotalRuntime'].fillna(
pd.Timedelta(seconds=60), inplace=True
template_result.model_results['TotalRuntime'] = (
template_result.model_results['TotalRuntime'].fillna(
pd.Timedelta(seconds=60)
)
)
else:
# trying to catch a rare and sneaky bug (perhaps some variety of beetle?)
Expand Down Expand Up @@ -2161,9 +2164,13 @@ def results(self, result_set: str = 'initial'):
result_set (str): 'validation' or 'initial'
"""
if result_set == 'validation':
return self.validation_results.model_results
return self.validation_results.model_results.sort_values(
"Score", ascending=True
)
else:
return self.initial_results.model_results
return self.initial_results.model_results.sort_values(
"Score", ascending=True
)

def failure_rate(self, result_set: str = 'initial'):
"""Return fraction of models passing with exceptions.
Expand Down Expand Up @@ -2280,6 +2287,22 @@ def export_template(
export_template = unpack_ensemble_models(
export_template, self.template_cols, keep_ensemble=False, recursive=True
).drop_duplicates()
if include_results:
export_template = export_template.drop(columns=['smape']).merge(
self.validation_results.model_results[['ID', 'smape']],
on="ID",
how='left',
)
# put smape back in the front
remaining_columns = [
col
for col in export_template.columns
if col not in self.template_cols_id and col not in ['smape', 'Runs']
]
new_order = (
self.template_cols_id + ['Runs', 'smape'] + remaining_columns
)
export_template = export_template.reindex(columns=new_order)
return self.save_template(filename, export_template)

def save_template(self, filename, export_template, **kwargs):
Expand Down
Loading

0 comments on commit 7a843d0

Please sign in to comment.