Skip to content

Commit

Permalink
Bump pytest version (#105)
Browse files Browse the repository at this point in the history
  • Loading branch information
d-a-bunin committed Oct 16, 2023
1 parent 3791c4a commit 71d1ae5
Show file tree
Hide file tree
Showing 43 changed files with 204 additions and 201 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fix CI job `cron-delete-untagged-images` ([#95](https://github.com/etna-team/etna/pull/95))
- Rendering table of contents in notebooks ([#1343](https://github.com/tinkoff-ai/etna/pull/1343))
- Fix formatting of docstrings, fix links from netlify to docs.etna.ai ([#62](https://github.com/etna-team/etna/pull/62))
- Fix multiple warnings, revert catching warnings during testing ([#105](https://github.com/etna-team/etna/pull/105))

### Removed
- FutureMixin ([#58](https://github.com/etna-team/etna/pull/58))
Expand Down
4 changes: 2 additions & 2 deletions etna/auto/auto.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def __init__(

def _top_k(self, summary: pd.DataFrame, k: int) -> List[BasePipeline]:
metric_name = f"{self.target_metric.name}_{self.metric_aggregation}"
df = summary[summary["state"].apply(lambda x: x is optuna.structs.TrialState.COMPLETE)]
df = summary[summary["state"].apply(lambda x: x is optuna.trial.TrialState.COMPLETE)]
df = df.drop_duplicates(subset=["hash"])
df = df.sort_values(
by=metric_name,
Expand Down Expand Up @@ -768,7 +768,7 @@ def _find_duplicate_trial(trial: Trial, pipeline: BasePipeline) -> Optional[Froz
pipeline_hash = config_hash(pipeline.to_dict())

for t in trial.study.trials:
if t.state != optuna.structs.TrialState.COMPLETE:
if t.state != optuna.trial.TrialState.COMPLETE:
continue

if t.user_attrs.get("hash") == pipeline_hash:
Expand Down
10 changes: 4 additions & 6 deletions etna/commands/forecast_command.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,14 @@
def compute_horizon(horizon: int, forecast_params: Dict[str, Any], tsdataset: TSDataset) -> int:
"""Compute new pipeline horizon if `start_timestamp` presented in `forecast_params`."""
if "start_timestamp" in forecast_params:
freq = tsdataset.freq

forecast_start_timestamp = pd.Timestamp(forecast_params["start_timestamp"], freq=freq)
forecast_start_timestamp = pd.Timestamp(forecast_params["start_timestamp"])
train_end_timestamp = tsdataset.index.max()

if forecast_start_timestamp <= train_end_timestamp:
raise ValueError("Parameter `start_timestamp` should greater than end of training dataset!")

delta = determine_num_steps(
start_timestamp=train_end_timestamp, end_timestamp=forecast_start_timestamp, freq=freq
start_timestamp=train_end_timestamp, end_timestamp=forecast_start_timestamp, freq=tsdataset.freq
)

horizon += delta - 1
Expand All @@ -53,9 +51,9 @@ def update_horizon(pipeline_configs: Dict[str, Any], forecast_params: Dict[str,


def filter_forecast(forecast_ts: TSDataset, forecast_params: Dict[str, Any]) -> TSDataset:
"""Filter out forecasts before `start_timestamp` if `start_timestamp` presented in `forecast_params`.."""
"""Filter out forecasts before `start_timestamp` if `start_timestamp` presented in `forecast_params`."""
if "start_timestamp" in forecast_params:
forecast_start_timestamp = pd.Timestamp(forecast_params["start_timestamp"], freq=forecast_ts.freq)
forecast_start_timestamp = pd.Timestamp(forecast_params["start_timestamp"])
forecast_ts.df = forecast_ts.df.loc[forecast_start_timestamp:, :]

return forecast_ts
Expand Down
2 changes: 1 addition & 1 deletion etna/datasets/tsdataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -1240,7 +1240,7 @@ def add_target_components(self, target_components_df: pd.DataFrame):
f"Set of target components differs between segments '{self.segments[0]}' and '{segment}'!"
)

components_sum = target_components_df.sum(axis=1, level="segment")
components_sum = target_components_df.groupby(axis=1, level="segment").sum()
if not np.allclose(components_sum.values, self[..., "target"].values):
raise ValueError("Components don't sum up to target!")

Expand Down
2 changes: 1 addition & 1 deletion etna/datasets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ def set_columns_wide(

def match_target_quantiles(features: Set[str]) -> Set[str]:
"""Find quantiles in dataframe columns."""
pattern = re.compile("target_\d+\.\d+$")
pattern = re.compile(r"target_\d+\.\d+$")
return {i for i in list(features) if pattern.match(i) is not None}


Expand Down
8 changes: 4 additions & 4 deletions etna/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -452,8 +452,8 @@ def training_step(self, batch: dict, *args, **kwargs): # type: ignore
:
loss
"""
loss, _, _ = self.step(batch, *args, **kwargs) # type: ignore
self.log("train_loss", loss, on_epoch=True)
loss, true_target, _ = self.step(batch, *args, **kwargs) # type: ignore
self.log("train_loss", loss, on_epoch=True, batch_size=len(true_target))
return loss

def validation_step(self, batch: dict, *args, **kwargs): # type: ignore
Expand All @@ -469,8 +469,8 @@ def validation_step(self, batch: dict, *args, **kwargs): # type: ignore
:
loss
"""
loss, _, _ = self.step(batch, *args, **kwargs) # type: ignore
self.log("val_loss", loss, on_epoch=True)
loss, true_target, _ = self.step(batch, *args, **kwargs) # type: ignore
self.log("val_loss", loss, on_epoch=True, batch_size=len(true_target))
return loss


Expand Down
2 changes: 1 addition & 1 deletion etna/models/sarimax.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ def _mle_regression_decomposition(self, state: np.ndarray, ssm: SimulationSmooth

if len(exog) > 0:
# restore parameters for exogenous variabales
exog_params = np.linalg.lstsq(a=exog, b=np.squeeze(ssm["obs_intercept"]))[0]
exog_params = np.linalg.lstsq(a=exog, b=np.squeeze(ssm["obs_intercept"]), rcond=None)[0]

# estimate exogenous components and append to others
weighted_exog = exog * exog_params[np.newaxis]
Expand Down
30 changes: 26 additions & 4 deletions etna/models/tbats.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,18 @@ def __init__(
context: abstract.ContextInterface, optional (default=None)
For advanced users only. Provide this to override default behaviors
"""
self.model = BATS(
self.use_box_cox = use_box_cox
self.box_cox_bounds = box_cox_bounds
self.use_trend = use_trend
self.use_damped_trend = use_damped_trend
self.seasonal_periods = seasonal_periods
self.use_arma_errors = use_arma_errors
self.show_warnings = show_warnings
self.n_jobs = n_jobs
self.multiprocessing_start_method = multiprocessing_start_method
self.context = context

self._model = BATS(
use_box_cox=use_box_cox,
box_cox_bounds=box_cox_bounds,
use_trend=use_trend,
Expand All @@ -409,7 +420,7 @@ def __init__(
multiprocessing_start_method=multiprocessing_start_method,
context=context,
)
super().__init__(base_model=_TBATSAdapter(self.model))
super().__init__(base_model=_TBATSAdapter(self._model))


class TBATSModel(
Expand Down Expand Up @@ -474,7 +485,18 @@ def __init__(
context: abstract.ContextInterface, optional (default=None)
For advanced users only. Provide this to override default behaviors
"""
self.model = TBATS(
self.use_box_cox = use_box_cox
self.box_cox_bounds = box_cox_bounds
self.use_trend = use_trend
self.use_damped_trend = use_damped_trend
self.seasonal_periods = seasonal_periods
self.use_arma_errors = use_arma_errors
self.show_warnings = show_warnings
self.n_jobs = n_jobs
self.multiprocessing_start_method = multiprocessing_start_method
self.context = context

self._model = TBATS(
use_box_cox=use_box_cox,
box_cox_bounds=box_cox_bounds,
use_trend=use_trend,
Expand All @@ -486,4 +508,4 @@ def __init__(
multiprocessing_start_method=multiprocessing_start_method,
context=context,
)
super().__init__(base_model=_TBATSAdapter(self.model))
super().__init__(base_model=_TBATSAdapter(self._model))
2 changes: 1 addition & 1 deletion etna/models/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def determine_freq(timestamps: Union[pd.Series, pd.DatetimeIndex]) -> str:
unable do determine frequency of data
"""
try:
freq = pd.infer_freq(timestamps, warn=False)
freq = pd.infer_freq(timestamps)
except ValueError:
freq = None

Expand Down
2 changes: 1 addition & 1 deletion etna/transforms/decomposition/change_points_based/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def _predict_per_interval_model(self, series: pd.Series) -> pd.Series:
"""Apply per-interval detrending to series."""
if self.intervals is None or self.per_interval_models is None:
raise ValueError("Transform is not fitted! Fit the Transform before calling transform method.")
prediction_series = pd.Series(index=series.index)
prediction_series = pd.Series(index=series.index, dtype=float)
for interval in self.intervals:
tmp_series = series[interval[0] : interval[1]]
if tmp_series.empty:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def __init__(
result column name. If not given use ``self.__repr__()``
"""
self.in_column = in_column
self.out_column = out_column if out_column is not None else self.__repr__()
self.out_column = out_column

self.change_points_model = (
change_points_model if change_points_model is not None else self._default_change_points_model
Expand All @@ -100,7 +100,7 @@ def __init__(
transform=_OneSegmentChangePointsSegmentationTransform(
in_column=self.in_column,
change_points_model=self.change_points_model,
out_column=self.out_column,
out_column=out_column if out_column is not None else self.__repr__(),
),
required_features=[in_column],
)
Expand Down
2 changes: 1 addition & 1 deletion etna/transforms/decomposition/change_points_based/trend.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def __init__(
super().__init__(
transform=_OneSegmentTrendTransform(
in_column=self.in_column,
out_column=self.out_column if self.out_column is not None else f"{self.__repr__()}",
out_column=self.out_column if self.out_column is not None else self.__repr__(),
change_points_model=self.change_points_model,
per_interval_model=self.per_interval_model,
),
Expand Down
2 changes: 1 addition & 1 deletion etna/transforms/math/differencing.py
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def _reconstruct_test(self, df: pd.DataFrame, columns_to_inverse: Set[str]) -> p
for column in columns_to_inverse:
to_transform = df.loc[:, pd.IndexSlice[segments, column]].copy()
init_df = self._test_init_df.copy() # type: ignore
init_df.columns.set_levels([column], level="feature", inplace=True)
init_df.columns = init_df.columns.set_levels([column], level="feature")
init_df = init_df[segments]
to_transform = pd.concat([init_df, to_transform])

Expand Down
74 changes: 33 additions & 41 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 71d1ae5

Please sign in to comment.