Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ Dispatch can be installed and used in it's own environment or installed into ano
environment using pip. To create an environment for Dispatch, navigate to the repo
folder in terminal and run:

.. code-block:: console
.. code-block:: bash

$ mamba update mamba
$ mamba env create --name dispatch --file environment.yml
Expand All @@ -40,7 +40,7 @@ If you get a ``CondaValueError`` that the prefix already exists, that means an
environment with the same name already exists. You must remove the old one before
creating the new one:

.. code-block:: console
.. code-block:: bash

$ mamba update mamba
$ mamba env remove --name dispatch
Expand Down
29 changes: 15 additions & 14 deletions src/dispatch/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import inspect
import json
import logging
from collections.abc import Callable
from datetime import datetime
from importlib.metadata import version
from io import BytesIO
Expand Down Expand Up @@ -155,7 +156,7 @@ def __init__(
)
self.starts = MTDF.reindex(columns=self.dispatchable_specs.index)

def add_total_costs(self, df):
def add_total_costs(self, df: pd.DataFrame) -> pd.DataFrame:
"""Add columns for total FOM and total startup from respective unit costs."""
df = (
df.reset_index()
Expand All @@ -172,7 +173,7 @@ def add_total_costs(self, df):
return df.drop(columns=["capacity_mw"])

@classmethod
def from_file(cls, path: Path | str):
def from_file(cls, path: Path | str) -> DispatchModel:
"""Recreate an instance of :class:`.DispatchModel` from disk."""
if not isinstance(path, Path):
path = Path(path)
Expand Down Expand Up @@ -289,12 +290,12 @@ def from_fresh(
)

@property
def dispatch_func(self):
def dispatch_func(self) -> Callable:
"""Appropriate dispatch engine depending on ``jit`` setting."""
return dispatch_engine_compiled if self._metadata["jit"] else dispatch_engine

@property
def is_redispatch(self):
def is_redispatch(self) -> bool:
"""True if this is redispatch, i.e. has meaningful historical dispatch."""
# more than 2 unique values are required because any plant that begins
# operation during the period will have both 0 and its capacity
Expand Down Expand Up @@ -589,7 +590,7 @@ def storage_durations(self) -> pd.DataFrame:
axis=1,
)

def system_level_summary(self, freq="YS", **kwargs):
def system_level_summary(self, freq: str = "YS", **kwargs) -> pd.DataFrame:
"""Create system and storage summary metrics."""
out = pd.concat(
[
Expand Down Expand Up @@ -648,9 +649,9 @@ def system_level_summary(self, freq="YS", **kwargs):
def re_summary(
self,
by: str | None = "technology_description",
freq="YS",
freq: str = "YS",
**kwargs,
):
) -> pd.DataFrame:
"""Create granular summary of renewable plant metrics."""
if self.re_profiles is None or self.re_plant_specs is None:
raise AssertionError(
Expand Down Expand Up @@ -681,12 +682,12 @@ def re_summary(
def storage_summary(
self,
by: str | None = "technology_description",
freq="YS",
freq: str = "YS",
**kwargs,
):
) -> pd.DataFrame:
"""Create granular summary of storage plant metrics."""
out = (
self.storage_dispatch.groupby([pd.Grouper(freq="YS")])
self.storage_dispatch.groupby([pd.Grouper(freq=freq)])
.sum()
.stack()
.reset_index()
Expand Down Expand Up @@ -716,7 +717,7 @@ def storage_summary(
return out.set_index(["plant_id_eia", "generator_id", "datetime"])
return out.groupby([by, "datetime"]).sum()

def full_output(self, freq="YS"):
def full_output(self, freq: str = "YS") -> pd.DataFrame:
"""Create full operations output."""
cols = [
"plant_name_eia",
Expand Down Expand Up @@ -758,9 +759,9 @@ def full_output(self, freq="YS"):
def dispatchable_summary(
self,
by: str | None = "technology_description",
freq="YS",
freq: str = "YS",
**kwargs,
):
) -> pd.DataFrame:
"""Create granular summary of dispatchable plant metrics.

Args:
Expand Down Expand Up @@ -841,7 +842,7 @@ def to_file(
compression=ZIP_DEFLATED,
clobber=False,
**kwargs,
):
) -> None:
"""Save :class:`.DispatchModel` to disk.

A very ugly process at the moment because of our goal not to use pickle
Expand Down