diff --git a/docs/release_notes.rst b/docs/release_notes.rst index b2fff217..e3a77848 100644 --- a/docs/release_notes.rst +++ b/docs/release_notes.rst @@ -5,7 +5,7 @@ Dispatch Release Notes .. _release-v0-2-0: --------------------------------------------------------------------------------------- -0.2.0 (2022-XX-XX) +0.2.0 (2022-09-15) --------------------------------------------------------------------------------------- What's New? @@ -24,14 +24,22 @@ What's New? ``retirement_date`` columns in ``fossil_plant_specs`` determine the period during dispatch that a generator may operate. This provides a straightforward method for having the portfolio you wish to dispatch change over time. +* Cleanup and rationalization of :meth:`dispatch.model.DispatchModel.to_file` and + :meth:`dispatch.model.DispatchModel.from_file` methods. +* Updates to system for storing and processing marginal cost data. This is now a + separate argument to :meth:`dispatch.model.DispatchModel.__init__` rather than a + messy confusing part of ``fossil_plant_specs``. This is now consistent with how + ``patio`` prepares and stores the data. Bug Fixes ^^^^^^^^^ -* ... +* :meth:`dispatch.model.DispatchModel.to_file` and + :meth:`dispatch.model.DispatchModel.from_file` now properly deal with + internal data stored in both :class:`pd.DataFrame` and :class:`pd.Series`. Known Issues ^^^^^^^^^^^^ -* ... +* Tests are still pretty rudimentary. .. _release-v0-1-0: diff --git a/environment.yml b/environment.yml index 619b7b49..6a9d82ad 100644 --- a/environment.yml +++ b/environment.yml @@ -10,6 +10,7 @@ dependencies: - numexpr ~= 2.8 - numpy >= 1.18.5,<2 - pandas >= 1.4,<1.5 + - pandera >= 0.12 - pyarrow >= 7,<10 - plotly diff --git a/setup.cfg b/setup.cfg index 2b9387ec..f60a0ecf 100644 --- a/setup.cfg +++ b/setup.cfg @@ -23,6 +23,7 @@ install_requires = numexpr ~= 2.8 numpy >= 1.18.5,<2 pandas >= 1.4,<1.5 + pandera ~= 0.12 pyarrow>=7, <10 [options.packages.find] @@ -40,7 +41,6 @@ doc = sphinx-autoapi>=1.8,<1.10 sphinx-autodoc-typehints sphinxcontrib-mermaid -; furo>=2022.4.7 pydata-sphinx-theme>=0.10 sphinx-issues>=1.2,<3.1 tests = diff --git a/src/dispatch/__init__.py b/src/dispatch/__init__.py index 9e735972..f88d37b7 100644 --- a/src/dispatch/__init__.py +++ b/src/dispatch/__init__.py @@ -1,22 +1,33 @@ """A template repository for a Python package created by Catalyst Cooperative.""" import logging - -import pkg_resources +from importlib.metadata import PackageNotFoundError, version from dispatch.engine import dispatch_engine, dispatch_engine_compiled +from dispatch.helpers import apply_op_ret_date, copy_profile from dispatch.model import DispatchModel -__all__ = ["DispatchModel", "dispatch_engine", "dispatch_engine_compiled"] +__all__ = [ + "DispatchModel", + "dispatch_engine", + "dispatch_engine_compiled", + "copy_profile", + "apply_op_ret_date", +] __author__ = "RMI" __contact__ = "aengel@rmi.org" __maintainer__ = "Alex Engel" __license__ = "BSD 3-Clause License" __maintainer_email__ = "aengel@rmi.org" -__version__ = pkg_resources.get_distribution("rmi.dispatch").version __docformat__ = "restructuredtext en" __description__ = "A simple and efficient dispatch model." +try: + __version__ = version("rmi.dispatch") +except PackageNotFoundError: + # package is not installed + pass + __projecturl__ = "https://github.com/rmi-electricity/dispatch" __downloadurl__ = "https://github.com/rmi-electricity/dispatch" diff --git a/src/dispatch/helpers.py b/src/dispatch/helpers.py new file mode 100644 index 00000000..d55b34d0 --- /dev/null +++ b/src/dispatch/helpers.py @@ -0,0 +1,77 @@ +"""Some helpers for profiles and such.""" + +from __future__ import annotations + +import pandas as pd + + +def copy_profile( + profiles: pd.DataFrame | pd.Series, years: range | tuple +) -> pd.DataFrame | pd.Series: + """Create multiple 'years' of hourly profile data. + + Args: + profiles: the profile to make copies of + years: the years, each of which will be a copy + of `profile`. + + Returns: Copied profiles. + + """ + dfs = [] + assert isinstance(profiles.index, pd.DatetimeIndex) + if isinstance(profiles, pd.Series): + profiles = profiles.to_frame() + if len(profiles.index.year.unique()) > 1: + raise AssertionError("`profile` must be for a single year") + for yr in years: + dfs.append( + profiles.assign( + datetime=lambda x: x.index.map(lambda y: y.replace(year=yr)), + ).set_index("datetime") + ) + return pd.concat(dfs, axis=0).squeeze() + + +def apply_op_ret_date( + profiles: pd.DataFrame, + operating_date: pd.Series, + retirement_date: pd.Series, + capacity_mw: pd.Series | None = None, +) -> pd.DataFrame: + """Zero profile unless it is between operating and retirement date. + + Args: + profiles: profiles of plants with a DatetimeIndex + operating_date: in service date for each plant, the index of operating date is + used throughout + retirement_date: retirement date for each plant + capacity_mw: capacity of each plant (only used when `profiles` are normalized) + + Returns: Profiles reflecting operating and retirement dates. + + """ + assert isinstance(profiles.index, pd.DatetimeIndex) + if capacity_mw is None: + capacity_mw = pd.Series(1, index=operating_date.index, name="capacity_mw") + if profiles.shape[1] == len(operating_date) == len(retirement_date): + pass + else: + raise AssertionError( + "`profiles` must have same number of columns as lengths of `op_date` and `ret_date`" + ) + # duplicate the DatetimeIndex so it is the same shape as `profiles` + dt_idx = pd.concat( + [profiles.index.to_series()] * profiles.shape[1], + axis=1, + ).to_numpy() + return pd.DataFrame( + ( + (dt_idx <= retirement_date.fillna(profiles.index.max()).to_numpy()) + & (dt_idx >= operating_date.fillna(profiles.index.min()).to_numpy()) + ) + * profiles.to_numpy() + * capacity_mw.to_numpy(), + index=profiles.index, + columns=operating_date.index, + ) diff --git a/src/dispatch/model.py b/src/dispatch/model.py index d2a02f9a..b9ecc0fb 100644 --- a/src/dispatch/model.py +++ b/src/dispatch/model.py @@ -1,27 +1,32 @@ """Simple dispatch model interface.""" - - from __future__ import annotations import inspect import json import logging +from collections.abc import Callable from datetime import datetime +from importlib.metadata import PackageNotFoundError, version from io import BytesIO from pathlib import Path from zipfile import ZIP_DEFLATED, ZipFile import numpy as np import pandas as pd -import pkg_resources __all__ = ["DispatchModel"] from dispatch.engine import dispatch_engine, dispatch_engine_compiled +from dispatch.helpers import apply_op_ret_date LOGGER = logging.getLogger(__name__) -__version__ = pkg_resources.get_distribution("rmi.dispatch").version +try: + __version__ = version("rmi.dispatch") +except PackageNotFoundError: + # package is not installed + pass + MTDF = pd.DataFrame() """An empty :py:class:`pd.DataFrame`.""" @@ -37,12 +42,11 @@ class DispatchModel: __slots__ = ( "net_load_profile", "fossil_plant_specs", + "fossil_marginal_cost", "fossil_profiles", "storage_specs", "re_profiles", "re_plant_specs", - "jit", - "name", "dt_idx", "yrs_idx", "fossil_redispatch", @@ -57,6 +61,7 @@ def __init__( net_load_profile: pd.Series[float], fossil_plant_specs: pd.DataFrame, fossil_profiles: pd.DataFrame, + fossil_marginal_cost: pd.Series[float], storage_specs: pd.DataFrame | None = None, re_profiles: pd.DataFrame | None = None, re_plant_specs: pd.DataFrame | None = None, @@ -74,8 +79,9 @@ def __init__( operating_date: the date the plant entered or will enter service retirement_date: the date the plant will retire startup_cost: cost to start up the generator - datetime(freq='YS'): a column for each year with marginal cost data fossil_profiles: set the maximum output of each generator in each hour + fossil_marginal_cost: marginal cost of each fossil generator in each year + must be tidy with :class:`pd.MultiIndex` of ['plant_id_eia', 'generator_id', 'datetime'] storage_specs: rows are types of storage, columns must contain: capacity_mw: max charge/discharge capacity in MW duration_hrs: storage duration in hours @@ -86,12 +92,15 @@ def __init__( jit: if True, use numba to compile the dispatch engine, False is mostly for debugging name: a name, only used in the repr """ - self.net_load_profile = net_load_profile - self.jit = jit - self.__meta__ = { + if not name and "balancing_authority_code_eia" in fossil_plant_specs: + name = fossil_plant_specs.balancing_authority_code_eia.mode().iloc[0] + self.__meta__: dict[str, str] = { + "name": name, "version": __version__, "created": datetime.now().strftime("%c"), + "jit": jit, } + self.net_load_profile: pd.Series = net_load_profile self.dt_idx = self.net_load_profile.index self.yrs_idx = self.dt_idx.to_series().groupby([pd.Grouper(freq="YS")]).first() @@ -100,41 +109,14 @@ def __init__( for col in ("capacity_mw", "ramp_rate", "startup_cost"): if col not in fossil_plant_specs: raise AssertionError(f"`fossil_plant_specs` requires `{col}` column") - if not all(x in fossil_plant_specs for x in self.yrs_idx): - raise AssertionError( - "`fossil_plant_specs` requires columns for plant cost with 'YS' datetime names" - ) self.fossil_plant_specs: pd.DataFrame = fossil_plant_specs - if not name and "balancing_authority_code_eia" in self.fossil_plant_specs: - self.name = ( - self.fossil_plant_specs.balancing_authority_code_eia.mode().iloc[0] - ) - else: - self.name = name - + # validate structure of `fossil_marginal_cost` and set the attribute + self.fossil_marginal_cost: pd.Series = self._validate_fossil_marginal_cost( + fossil_marginal_cost + ) # validate `storage_specs` - if storage_specs is None: - LOGGER.warning("Careful, dispatch without storage is untested") - self.storage_specs = pd.DataFrame( - [0.0, 0, 1.0, self.net_load_profile.index.max()], - columns=[ - "capacity_mw", - "duration_hrs", - "roundtrip_eff", - "operating_date", - ], - ) - else: - for col in ( - "capacity_mw", - "duration_hrs", - "roundtrip_eff", - "operating_date", - ): - if col not in storage_specs: - raise AssertionError(f"`storage_specs` requires `{col}` column") - self.storage_specs = storage_specs + self.storage_specs: pd.DataFrame = self._validate_storage_specs(storage_specs) if len(fossil_profiles) != len(self.net_load_profile): raise AssertionError( @@ -163,43 +145,90 @@ def __init__( ) self.starts = MTDF.reindex(columns=self.fossil_plant_specs.index) + def _validate_storage_specs(self, storage_specs): + if storage_specs is None: + LOGGER.warning("Careful, dispatch without storage is untested") + storage_specs = pd.DataFrame( + [0.0, 0, 1.0, self.net_load_profile.index.max()], + columns=[ + "capacity_mw", + "duration_hrs", + "roundtrip_eff", + "operating_date", + ], + ) + else: + for col in ( + "capacity_mw", + "duration_hrs", + "roundtrip_eff", + "operating_date", + ): + if col not in storage_specs: + raise AssertionError(f"`storage_specs` requires `{col}` column") + return storage_specs + + def _validate_fossil_marginal_cost(self, fossil_marginal_cost): + if not np.all( + fossil_marginal_cost.reset_index( + level="datetime", drop=True + ).index.drop_duplicates() + == self.fossil_plant_specs.index + ): + raise AssertionError( + "generators in `fossil_marginal_cost` do not match generators in `fossil_plant_specs`" + ) + marg_freq = pd.infer_freq( + fossil_marginal_cost.index.get_level_values(2).unique() + ) + self.__meta__["marginal_cost_freq"] = marg_freq + marg_dts = fossil_marginal_cost.index.get_level_values("datetime") + missing_prds = [ + d + for d in self.net_load_profile.resample(marg_freq).first().index + if d not in marg_dts + ] + if missing_prds: + raise AssertionError(f"{missing_prds} not in `fossil_marginal_cost`") + return fossil_marginal_cost + @classmethod - def from_disk(cls, path: Path | str): + def from_file(cls, path: Path | str): """Recreate an instance of `DispatchModel` from disk.""" if not isinstance(path, Path): path = Path(path) + + def _type_check(meta): + if meta["__qualname__"] != cls.__qualname__: + raise TypeError( + f"{path.name} represents a `{meta['__qualname__']}` which " + f"is not compatible with `{cls.__qualname__}.from_disk()`" + ) + del meta["__qualname__"] + data_dict = {} with ZipFile(path.with_suffix(".zip"), "r") as z: metadata = json.loads(z.read("metadata.json")) + _type_check(metadata) + plant_index = pd.MultiIndex.from_tuples( + metadata.pop("plant_index"), names=["plant_id_eia", "generator_id"] + ) for x in z.namelist(): if "parquet" in x: - data_dict[x.removesuffix(".parquet")] = pd.read_parquet( - BytesIO(z.read(x)) - ) - if metadata["__qualname__"] != cls.__qualname__: - raise TypeError( - f"{path.name} represents a `{metadata['__qualname__']}` which " - f"is not compatible with `{cls.__qualname__}.from_disk()`" - ) - - # have to fix columns and types - data_dict["fossil_profiles"].columns = data_dict["fossil_plant_specs"].index - data_dict["fossil_redispatch"].columns = data_dict["fossil_plant_specs"].index - data_dict["fossil_plant_specs"].columns = [ - pd.Timestamp(x) if x[0] == "2" else x - for x in data_dict["fossil_plant_specs"].columns - ] - data_dict["net_load_profile"] = data_dict["net_load_profile"].squeeze() + df_name = x.removesuffix(".parquet") + df_in = pd.read_parquet(BytesIO(z.read(x))).squeeze() + if df_name in ("fossil_profiles", "fossil_redispatch"): + df_in.columns = plant_index + data_dict[df_name] = df_in sig = inspect.signature(cls).parameters self = cls( - **{k: v for k, v in data_dict.items() if k in sig}, - **{k: v for k, v in metadata.items() if k in sig}, + **{k: v for k, v in (data_dict | metadata).items() if k in sig}, ) for k, v in data_dict.items(): if k not in sig: setattr(self, k, v) - self.__meta__ = {k: v for k, v in metadata.items() if k not in sig} + self.__meta__.update({k: v for k, v in metadata.items() if k not in sig}) return self @classmethod @@ -208,6 +237,7 @@ def from_patio( net_load: pd.Series[float], fossil_profiles: pd.DataFrame, plant_data: pd.DataFrame, + cost_data: pd.Series, storage_specs: pd.DataFrame, jit: bool = True, ) -> DispatchModel: @@ -217,16 +247,18 @@ def from_patio( return cls( net_load_profile=net_load, fossil_plant_specs=plant_data, + fossil_marginal_cost=cost_data, fossil_profiles=fossil_profiles, storage_specs=storage_specs, jit=jit, ) @classmethod - def new( + def from_fresh( cls, net_load_profile: pd.Series[float], fossil_plant_specs: pd.DataFrame, + fossil_marginal_cost: pd.Series, storage_specs: pd.DataFrame, jit: bool = True, ) -> DispatchModel: @@ -236,13 +268,6 @@ def new( operating_date=net_load_profile.index.min() ) - # duplicate the DatetimeIndex so it is the same shape as `fossil_profiles` - dt_ix = pd.concat( - [net_load_profile.index.to_series()] * len(fossil_plant_specs), - axis=1, - ).to_numpy() - wk = pd.Timedelta(weeks=1) - # insert an `operating_date` column if it doesn't exist and fill missing values # with a date before the dispatch period if "operating_date" not in fossil_plant_specs: @@ -255,31 +280,31 @@ def new( if "retirement_date" not in fossil_plant_specs: if "planned_retirement_date" not in fossil_plant_specs: fossil_plant_specs = fossil_plant_specs.assign( - retirement_date=net_load_profile.index.max() + wk + retirement_date=net_load_profile.index.max() ) else: fossil_plant_specs = fossil_plant_specs.rename( columns={"planned_retirement_date": "retirement_date"} ) fossil_plant_specs = fossil_plant_specs.fillna( - {"retirement_date": net_load_profile.index.max() + wk} + {"retirement_date": net_load_profile.index.max()} ) - fossil_profiles = pd.DataFrame( - # make a boolean array for whether a particular hour comes between - # a generator's `operating_date` and `retirement_date` or not - ( - (dt_ix < fossil_plant_specs.retirement_date.to_numpy()) - & (dt_ix >= fossil_plant_specs.operating_date.to_numpy()) - ) - * fossil_plant_specs.capacity_mw.to_numpy(), - columns=fossil_plant_specs.index, - index=net_load_profile.index, + # make a boolean array for whether a particular hour comes between + # a generator's `operating_date` and `retirement_date` or not + fossil_profiles = apply_op_ret_date( + pd.DataFrame( + 1, index=net_load_profile.index, columns=fossil_plant_specs.index + ), + fossil_plant_specs.operating_date, + fossil_plant_specs.retirement_date, + fossil_plant_specs.capacity_mw, ) return cls( net_load_profile=net_load_profile, fossil_plant_specs=fossil_plant_specs, + fossil_marginal_cost=fossil_marginal_cost, fossil_profiles=fossil_profiles, storage_specs=storage_specs, jit=jit, @@ -288,7 +313,7 @@ def new( @property def dispatch_func(self): """Appropriate dispatch engine depending on ``self.jit``.""" - return dispatch_engine_compiled if self.jit else dispatch_engine + return dispatch_engine_compiled if self.__meta__["jit"] else dispatch_engine @property def is_redispatch(self): @@ -336,7 +361,7 @@ def __call__(self) -> None: fossil_startup_cost=self.fossil_plant_specs.startup_cost.to_numpy( dtype=np.float_ ), - fossil_marginal_cost=self.fossil_plant_specs[self.yrs_idx].to_numpy( + fossil_marginal_cost=self.fossil_marginal_cost.unstack().to_numpy( dtype=np.float_ ), storage_mw=self.storage_specs.capacity_mw.to_numpy(dtype=np.float_), @@ -387,9 +412,9 @@ def __call__(self) -> None: def _cost(self, profiles: pd.DataFrame) -> pd.DataFrame: """Determine total cost based on hourly production and starts.""" profs = profiles.to_numpy() - marginal_cost = profs * self.fossil_plant_specs[self.yrs_idx].T.reindex( - index=self.net_load_profile.index, method="ffill" - ) + marginal_cost = profs * self.fossil_marginal_cost.unstack( + level=("plant_id_eia", "generator_id") + ).reindex(index=self.net_load_profile.index, method="ffill") start_cost = self.fossil_plant_specs.startup_cost.to_numpy() * np.where( (profs == 0) & (np.roll(profs, -1, axis=0) > 0), 1, 0 ) @@ -629,7 +654,7 @@ def operations_summary( .sort_index() ) - def to_disk(self, path: Path | str, compression=ZIP_DEFLATED, clobber=False): + def to_file(self, path: Path | str, compression=ZIP_DEFLATED, clobber=False): """Save `DispatchModel` to disk. A very ugly process at the moment because of our goal not to use pickle @@ -642,40 +667,37 @@ def to_disk(self, path: Path | str, compression=ZIP_DEFLATED, clobber=False): if path.exists() and not clobber: raise FileExistsError(f"{path} exists, to overwrite set `clobber=True`") + _str_cols: Callable = lambda df, n: df.set_axis( + list(map(str, range(df.shape[1]))), axis="columns" + ) + _null: Callable = lambda df, n: df + _to_frame: Callable = lambda df, n: df.to_frame(name=n) + auto_parquet = ( - "re_profiles", - "storage_dispatch", - "system_data", - "storage_specs", + ("re_profiles", _null), + ("storage_dispatch", _null), + ("system_data", _null), + ("storage_specs", _null), + ("fossil_plant_specs", _null), + ("fossil_marginal_cost", _to_frame), + ("fossil_profiles", _str_cols), + ("fossil_redispatch", _str_cols), + ("net_load_profile", _to_frame), ) metadata = { - "name": self.name, - "jit": self.jit, - "__qualname__": self.__class__.__qualname__, **self.__meta__, + "__qualname__": self.__class__.__qualname__, + "plant_index": list(self.fossil_plant_specs.index), } - # need to make all column names strings - fossil_plant_specs = self.fossil_plant_specs.copy() - fossil_plant_specs.columns = list(map(str, self.fossil_plant_specs.columns)) - fossil_profiles = self.fossil_profiles.set_axis( - list(map(str, range(self.fossil_profiles.shape[1]))), axis="columns" - ) - fossil_redispatch = self.fossil_redispatch.set_axis( - list(map(str, range(self.fossil_profiles.shape[1]))), axis="columns" - ) with ZipFile(path, "w", compression=compression) as z: - for df_name in auto_parquet: - df = getattr(self, df_name) - if df is not None: - z.writestr(f"{df_name}.parquet", df.to_parquet()) - z.writestr( - "net_load_profile.parquet", - self.net_load_profile.to_frame(name="nl").to_parquet(), - ) - z.writestr("fossil_plant_specs.parquet", fossil_plant_specs.to_parquet()) - z.writestr("fossil_profiles.parquet", fossil_profiles.to_parquet()) - z.writestr("fossil_redispatch.parquet", fossil_redispatch.to_parquet()) + for df_name, func in auto_parquet: + try: + df_out = func(getattr(self, df_name), df_name) + if df_out is not None: + z.writestr(f"{df_name}.parquet", df_out.to_parquet()) + except Exception as exc: + raise RuntimeError(f"{df_name} {exc!r}") from exc z.writestr( "metadata.json", json.dumps(metadata, ensure_ascii=False, indent=4) ) @@ -683,7 +705,7 @@ def to_disk(self, path: Path | str, compression=ZIP_DEFLATED, clobber=False): def __repr__(self) -> str: return ( self.__class__.__qualname__ - + f"({self.name=}, {self.jit=}, n_plants={len(self.fossil_plant_specs)}, ...)".replace( + + f"({', '.join(f'{k}={v}' for k, v in self.__meta__.items())}, n_plants={len(self.fossil_plant_specs)})".replace( "self.", "" ) ) diff --git a/tests/conftest.py b/tests/conftest.py index 96c54855..fb1e92a5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -51,5 +51,14 @@ def fossil_profiles(test_dir) -> pd.DataFrame: def fossil_specs(test_dir) -> pd.DataFrame: """Fossil Profiles.""" df = pd.read_parquet(test_dir / "data/plant_specs.parquet") - df.columns = [pd.Timestamp(x) if x[0] == "2" else x for x in df.columns] + # df.columns = [pd.Timestamp(x) if x[0] == "2" else x for x in df.columns] return df + + +@pytest.fixture(scope="session") +def fossil_cost(test_dir) -> pd.Series: + """Fossil Profiles.""" + df = pd.read_parquet(test_dir / "data/plant_specs.parquet").filter(like="20") + df.columns = df.columns.map(lambda x: pd.Timestamp(x)) + df.columns.name = "datetime" + return df.stack() diff --git a/tests/dispatch_test.py b/tests/dispatch_test.py index 64f8c875..81bc9dd3 100644 --- a/tests/dispatch_test.py +++ b/tests/dispatch_test.py @@ -5,23 +5,25 @@ from dispatch import DispatchModel -def setup_dm(fossil_profiles, fossil_specs, re_profiles, re, storage): +def setup_dm(fossil_profiles, fossil_specs, fossil_cost, re_profiles, re, storage): """Setup `DispatchModel`.""" fossil_profiles.columns = fossil_specs.index dm = DispatchModel.from_patio( fossil_profiles.sum(axis=1) - re_profiles @ re, fossil_profiles=fossil_profiles, + cost_data=fossil_cost, plant_data=fossil_specs, storage_specs=storage, ) return dm -def test_from_patio(fossil_profiles, re_profiles, fossil_specs): +def test_from_patio(fossil_profiles, re_profiles, fossil_specs, fossil_cost): """Dummy test to quiet pytest.""" dm = setup_dm( fossil_profiles, fossil_specs, + fossil_cost, re_profiles, np.array([5000.0, 5000.0, 0.0, 0.0]), pd.DataFrame( @@ -32,23 +34,26 @@ def test_from_patio(fossil_profiles, re_profiles, fossil_specs): assert dm -def test_new(fossil_profiles, re_profiles, fossil_specs): +def test_new(fossil_profiles, re_profiles, fossil_specs, fossil_cost): """Dummy test to quiet pytest.""" fossil_specs.iloc[ 0, fossil_specs.columns.get_loc("retirement_date") ] = fossil_profiles.index.max() - pd.Timedelta(weeks=15) - self = DispatchModel.new( + self = DispatchModel.from_fresh( net_load_profile=fossil_profiles.sum(axis=1), fossil_plant_specs=fossil_specs, + fossil_marginal_cost=fossil_cost, storage_specs=pd.DataFrame( [(5000, 4, 0.9), (2000, 8760, 0.5)], columns=["capacity_mw", "duration_hrs", "roundtrip_eff"], ), + jit=True, ) + self() assert self -def test_new_with_dates(fossil_profiles, re_profiles, fossil_specs): +def test_new_with_dates(fossil_profiles, re_profiles, fossil_specs, fossil_cost): """Test operating and retirement dates for fossil and storage.""" fossil_specs.iloc[ 0, fossil_specs.columns.get_loc("retirement_date") @@ -56,9 +61,10 @@ def test_new_with_dates(fossil_profiles, re_profiles, fossil_specs): fossil_specs.loc[8066, "retirement_date"] = pd.Timestamp( year=2018, month=12, day=31 ) - self = DispatchModel.new( + self = DispatchModel.from_fresh( net_load_profile=fossil_profiles.sum(axis=1), fossil_plant_specs=fossil_specs, + fossil_marginal_cost=fossil_cost, storage_specs=pd.DataFrame( [ (5000, 4, 0.9, pd.Timestamp(year=2016, month=1, day=1)), @@ -72,12 +78,13 @@ def test_new_with_dates(fossil_profiles, re_profiles, fossil_specs): assert self -def test_low_lost_load(fossil_profiles, re_profiles, fossil_specs): +def test_low_lost_load(fossil_profiles, re_profiles, fossil_specs, fossil_cost): """Dummy test to quiet pytest.""" fossil_profiles.columns = pd.MultiIndex.from_tuples(fossil_specs.index) dm = setup_dm( fossil_profiles, fossil_specs, + fossil_cost, re_profiles, np.array([5000.0, 5000.0, 0.0, 0.0]), pd.DataFrame( @@ -89,12 +96,15 @@ def test_low_lost_load(fossil_profiles, re_profiles, fossil_specs): assert (dm.lost_load() / dm.lost_load().sum()).iloc[0] > 0.9999 -def test_write_and_read(fossil_profiles, re_profiles, fossil_specs, test_dir): +def test_write_and_read( + fossil_profiles, re_profiles, fossil_specs, test_dir, fossil_cost +): """Test that DispatchModel can be written and read.""" fossil_profiles.columns = fossil_specs.index dm = setup_dm( fossil_profiles, fossil_specs, + fossil_cost, re_profiles, np.array([5000.0, 5000.0, 0.0, 0.0]), pd.DataFrame( @@ -104,10 +114,10 @@ def test_write_and_read(fossil_profiles, re_profiles, fossil_specs, test_dir): ) file = test_dir / "test_obj.zip" try: - dm.to_disk(file) - x = DispatchModel.from_disk(file) + dm.to_file(file) + x = DispatchModel.from_file(file) x() - x.to_disk(file, clobber=True) + x.to_file(file, clobber=True) except Exception as exc: raise exc else: @@ -117,12 +127,13 @@ def test_write_and_read(fossil_profiles, re_profiles, fossil_specs, test_dir): file.unlink() -def test_marginal_cost(fossil_profiles, re_profiles, fossil_specs): +def test_marginal_cost(fossil_profiles, re_profiles, fossil_specs, fossil_cost): """Setup for testing cost and grouper methods.""" fossil_profiles.columns = fossil_specs.index self = setup_dm( fossil_profiles, fossil_specs, + fossil_cost, re_profiles, np.array([5000.0, 5000.0, 0.0, 0.0]), pd.DataFrame(