Skip to content

Commit

Permalink
Merge branch 'dev' into features/#671-assign-cts-to-buildings
Browse files Browse the repository at this point in the history
  • Loading branch information
nailend committed Sep 22, 2022
2 parents 23cc457 + 022020f commit 8a5f7f3
Show file tree
Hide file tree
Showing 7 changed files with 729 additions and 51 deletions.
8 changes: 8 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
Expand Up @@ -395,8 +395,12 @@ Changed
`#866 <https://github.com/openego/eGon-data/issues/866>`_
* Add noflex scenario for motorized individual travel
`#821 <https://github.com/openego/eGon-data/issues/821>`_
* Add sanity checks for motorized individual travel
`#820 <https://github.com/openego/eGon-data/issues/820>`_
* Parallelize sanity checks
`#882 <https://github.com/openego/eGon-data/issues/882>`_
* Rename noflex to lowflex scenario for motorized individual travel
`#921 <https://github.com/openego/eGon-data/issues/921>`_


Bug Fixes
Expand Down Expand Up @@ -575,6 +579,10 @@ Bug Fixes
`#852 <https://github.com/openego/eGon-data/issues/852>`_
* Temporarily set upper version limit for pandas
`#829 <https://github.com/openego/eGon-data/issues/829>`_
* Delete eMob MIT data from eTraGo tables on init
`#878 <https://github.com/openego/eGon-data/issues/878>`_
* Fix model id issues in DSM potentials for CTS and industry
`#901 <https://github.com/openego/eGon-data/issues/901>`_

.. _PR #692: https://github.com/openego/eGon-data/pull/692
.. _#343: https://github.com/openego/eGon-data/issues/343
Expand Down
1 change: 1 addition & 0 deletions src/egon/data/airflow/dags/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,5 +579,6 @@
storage_etrago,
hts_etrago_table,
fill_etrago_generators,
emobility_mit,
]
)
10 changes: 5 additions & 5 deletions src/egon/data/datasets.yml
Original file line number Diff line number Diff line change
Expand Up @@ -1076,12 +1076,12 @@ emobility_mit:
variation:
eGon2035: "NEP C 2035"
eGon100RE: "Reference 2050"
# name of no-flex scenario
noflex:
create_noflex_scenario: True
# name of low-flex scenario
lowflex:
create_lowflex_scenario: True
names:
eGon2035: "eGon2035_noflex"
eGon100RE: "eGon100RE_noflex"
eGon2035: "eGon2035_lowflex"
eGon100RE: "eGon100RE_lowflex"

model_timeseries:
reduce_memory: True
Expand Down
72 changes: 48 additions & 24 deletions src/egon/data/datasets/DSM_cts_ind.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
import egon.data.config
from egon.data import db
import geopandas as gpd
import numpy as np
import pandas as pd
import geopandas as gpd

from egon.data import db
from egon.data.datasets import Dataset
from egon.data.datasets.electricity_demand.temporal import calc_load_curve
from egon.data.datasets.industry.temporal import identify_bus
from egon.data.datasets import Dataset
import egon.data.config


class dsm_Potential(Dataset):
def __init__(self, dependencies):
super().__init__(
name="DSM_potentials",
version="0.0.2",
version="0.0.3",
dependencies=dependencies,
tasks=(dsm_cts_ind_processing),
)
Expand Down Expand Up @@ -440,12 +441,23 @@ def create_dsm_components(con, p_max, p_min, e_max, e_min, dsm):
max_id = 0
dsm_id = max_id + 1
bus_id = pd.Series(index=dsm_buses.index, dtype=int)
bus_id.iloc[0 : int((len(bus_id) / 2))] = range(
dsm_id, int((dsm_id + len(dsm_buses) / 2))

# Get number of DSM buses for both scenarios
rows_per_scenario = (
dsm_buses.groupby("scn_name").count().original_bus.to_dict()
)
bus_id.iloc[int((len(bus_id) / 2)) : len(bus_id)] = range(
dsm_id, int((dsm_id + len(dsm_buses) / 2))

# Assignment of DSM ids
bus_id.iloc[0 : rows_per_scenario.get("eGon2035", 0)] = range(
dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
)
bus_id.iloc[
rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
"eGon2035", 0
)
+ rows_per_scenario.get("eGon100RE", 0)
] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))

dsm_buses["bus_id"] = bus_id

# add links from "orignal" buses to DSM-buses
Expand All @@ -466,12 +478,18 @@ def create_dsm_components(con, p_max, p_min, e_max, e_min, dsm):
max_id = 0
dsm_id = max_id + 1
link_id = pd.Series(index=dsm_buses.index, dtype=int)
link_id.iloc[0 : int((len(link_id) / 2))] = range(
dsm_id, int((dsm_id + len(dsm_links) / 2))
)
link_id.iloc[int((len(link_id) / 2)) : len(link_id)] = range(
dsm_id, int((dsm_id + len(dsm_links) / 2))

# Assignment of link ids
link_id.iloc[0 : rows_per_scenario.get("eGon2035", 0)] = range(
dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
)
link_id.iloc[
rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
"eGon2035", 0
)
+ rows_per_scenario.get("eGon100RE", 0)
] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))

dsm_links["link_id"] = link_id

# add calculated timeseries to df to be returned
Expand Down Expand Up @@ -499,12 +517,18 @@ def create_dsm_components(con, p_max, p_min, e_max, e_min, dsm):
max_id = 0
dsm_id = max_id + 1
store_id = pd.Series(index=dsm_buses.index, dtype=int)
store_id.iloc[0 : int((len(store_id) / 2))] = range(
dsm_id, int((dsm_id + len(dsm_stores) / 2))
)
store_id.iloc[int((len(store_id) / 2)) : len(store_id)] = range(
dsm_id, int((dsm_id + len(dsm_stores) / 2))

# Assignment of store ids
store_id.iloc[0 : rows_per_scenario.get("eGon2035", 0)] = range(
dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
)
store_id.iloc[
rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
"eGon2035", 0
)
+ rows_per_scenario.get("eGon100RE", 0)
] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))

dsm_stores["store_id"] = store_id

# add calculated timeseries to df to be returned
Expand Down Expand Up @@ -580,20 +604,20 @@ def aggregate_components(con, df_dsm_buses, df_dsm_links, df_dsm_stores):
df_dsm_stores.sort_values("scn_name", inplace=True)

# select new bus_ids for aggregated buses and add to links and stores
bus_id = db.next_etrago_id("Bus") + df_dsm_buses.index
bus_id = db.next_etrago_id("Bus") + df_dsm_buses.index

df_dsm_buses["bus_id"] = bus_id
df_dsm_links["dsm_bus"] = bus_id
df_dsm_stores["bus"] = bus_id

# select new link_ids for aggregated links
link_id = db.next_etrago_id("Link") + df_dsm_links.index
link_id = db.next_etrago_id("Link") + df_dsm_links.index

df_dsm_links["link_id"] = link_id

# select new store_ids to aggregated stores

store_id = db.next_etrago_id("Store") + df_dsm_stores.index
store_id = db.next_etrago_id("Store") + df_dsm_stores.index

df_dsm_stores["store_id"] = store_id

Expand Down Expand Up @@ -734,7 +758,7 @@ def delete_dsm_entries(carrier):
# links

sql = f"""DELETE FROM {targets["link_timeseries"]["schema"]}.{targets["link_timeseries"]["table"]} t
WHERE t.link_id IN
WHERE t.link_id IN
(SELECT l.link_id FROM {targets["link"]["schema"]}.{targets["link"]["table"]} l
WHERE l.carrier LIKE '{carrier}');"""
db.execute_sql(sql)
Expand All @@ -745,7 +769,7 @@ def delete_dsm_entries(carrier):
# stores

sql = f"""DELETE FROM {targets["store_timeseries"]["schema"]}.{targets["store_timeseries"]["table"]} t
WHERE t.store_id IN
WHERE t.store_id IN
(SELECT s.store_id FROM {targets["store"]["schema"]}.{targets["store"]["table"]} s
WHERE s.carrier LIKE '{carrier}');"""
db.execute_sql(sql)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,7 @@
WORKING_DIR,
)
from egon.data.datasets.emobility.motorized_individual_travel.model_timeseries import (
delete_model_data_from_db,
generate_model_data_bunch,
generate_model_data_eGon100RE_remaining,
generate_model_data_eGon2035_remaining,
Expand Down Expand Up @@ -413,7 +414,7 @@ def generate_model_data_tasks(scenario_name):

super().__init__(
name="MotorizedIndividualTravel",
version="0.0.3",
version="0.0.4",
dependencies=dependencies,
tasks=(
create_tables,
Expand All @@ -422,6 +423,7 @@ def generate_model_data_tasks(scenario_name):
(extract_trip_file, write_evs_trips_to_db),
},
allocate_evs_to_grid_districts,
delete_model_data_from_db,
{
*generate_model_data_tasks(scenario_name="eGon2035"),
*generate_model_data_tasks(scenario_name="eGon100RE"),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,7 @@ def calc_initial_ev_soc(bus_id: int, scenario_name: str) -> pd.DataFrame:
/ initial_soc_per_ev_type.battery_capacity_sum.sum()
)

def write_to_db(write_noflex_model: bool) -> None:
def write_to_db(write_lowflex_model: bool) -> None:
"""Write model data to eTraGo tables"""

@db.check_db_unique_violation
Expand Down Expand Up @@ -730,13 +730,13 @@ def write_load(
f"with bus_id {bus_id} in table egon_etrago_bus!"
)

# Call DB writing functions for regular or noflex scenario
# Call DB writing functions for regular or lowflex scenario
# * use corresponding scenario name as defined in datasets.yml
# * no storage for noflex scenario
# * no storage for lowflex scenario
# * load timeseries:
# * regular (flex): use driving load
# * noflex: use dumb charging load
if write_noflex_model is False:
# * lowflex: use dumb charging load
if write_lowflex_model is False:
emob_bus_id = write_bus(scenario_name=scenario_name)
write_link(scenario_name=scenario_name)
write_store(scenario_name=scenario_name)
Expand All @@ -748,12 +748,12 @@ def write_load(
),
)
else:
# Get noflex scenario name
noflex_scenario_name = DATASET_CFG["scenario"]["noflex"]["names"][
scenario_name
]
# Get lowflex scenario name
lowflex_scenario_name = DATASET_CFG["scenario"]["lowflex"][
"names"
][scenario_name]
write_load(
scenario_name=noflex_scenario_name,
scenario_name=lowflex_scenario_name,
connection_bus_id=etrago_bus.bus_id,
load_ts=hourly_load_time_series_df.load_time_series.to_list(),
)
Expand Down Expand Up @@ -830,26 +830,80 @@ def write_to_file():
# Crop hourly TS if needed
hourly_load_time_series_df = hourly_load_time_series_df[:8760]

# Create noflex scenario?
write_noflex_model = DATASET_CFG["scenario"]["noflex"][
"create_noflex_scenario"
# Create lowflex scenario?
write_lowflex_model = DATASET_CFG["scenario"]["lowflex"][
"create_lowflex_scenario"
]

# Get initial average storage SoC
initial_soc_mean = calc_initial_ev_soc(bus_id, scenario_name)

# Write to database: regular and noflex scenario
write_to_db(write_noflex_model=False)
print(' Writing flex scenario...')
if write_noflex_model is True:
print(' Writing noflex scenario...')
write_to_db(write_noflex_model=True)
# Write to database: regular and lowflex scenario
write_to_db(write_lowflex_model=False)
print(" Writing flex scenario...")
if write_lowflex_model is True:
print(" Writing lowflex scenario...")
write_to_db(write_lowflex_model=True)

# Export to working dir if requested
if DATASET_CFG["model_timeseries"]["export_results_to_csv"]:
write_to_file()


def delete_model_data_from_db():
"""Delete all eMob MIT data from eTraGo PF tables"""
with db.session_scope() as session:
# Buses
session.query(EgonPfHvBus).filter(
EgonPfHvBus.carrier == "Li ion"
).delete(synchronize_session=False)

# Link TS
subquery = (
session.query(EgonPfHvLink.link_id)
.filter(EgonPfHvLink.carrier == "BEV charger")
.subquery()
)

session.query(EgonPfHvLinkTimeseries).filter(
EgonPfHvLinkTimeseries.link_id.in_(subquery)
).delete(synchronize_session=False)
# Links
session.query(EgonPfHvLink).filter(
EgonPfHvLink.carrier == "BEV charger"
).delete(synchronize_session=False)

# Store TS
subquery = (
session.query(EgonPfHvStore.store_id)
.filter(EgonPfHvStore.carrier == "battery storage")
.subquery()
)

session.query(EgonPfHvStoreTimeseries).filter(
EgonPfHvStoreTimeseries.store_id.in_(subquery)
).delete(synchronize_session=False)
# Stores
session.query(EgonPfHvStore).filter(
EgonPfHvStore.carrier == "battery storage"
).delete(synchronize_session=False)

# Load TS
subquery = (
session.query(EgonPfHvLoad.load_id)
.filter(EgonPfHvLoad.carrier == "land transport EV")
.subquery()
)

session.query(EgonPfHvLoadTimeseries).filter(
EgonPfHvLoadTimeseries.load_id.in_(subquery)
).delete(synchronize_session=False)
# Loads
session.query(EgonPfHvLoad).filter(
EgonPfHvLoad.carrier == "land transport EV"
).delete(synchronize_session=False)


def load_grid_district_ids() -> pd.Series:
"""Load bus IDs of all grid districts"""
with db.session_scope() as session:
Expand Down

0 comments on commit 8a5f7f3

Please sign in to comment.