Skip to content

Commit

Permalink
Merge efcbc7d into 4b9c018
Browse files Browse the repository at this point in the history
  • Loading branch information
uvchik committed Sep 11, 2019
2 parents 4b9c018 + efcbc7d commit 41998e9
Show file tree
Hide file tree
Showing 17 changed files with 517 additions and 231 deletions.
17 changes: 13 additions & 4 deletions reegis/bmwi.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,10 @@ def read_bmwi_sheet_7(sub):
def bmwi_re_energy_capacity():
"""Prepare the energy production and capacity table from sheet 20.
capacity: [MW]
energy: [GWh]
fraction: [-]
Examples
--------
>>> re = bmwi_re_energy_capacity() # doctest: +SKIP
Expand Down Expand Up @@ -136,16 +140,21 @@ def get_annual_electricity_demand_bmwi(year):
--------
>>> get_annual_electricity_demand_bmwi(2014) # doctest: +SKIP
523.988
>>> get_annual_electricity_demand_bmwi(1900) # doctest: +SKIP
None
"""
import math
infile = get_bmwi_energiedaten_file()

table = pd.read_excel(infile, '21', skiprows=7, index_col=[0])
try:
return table.loc[' zusammen', year]
value = table.loc[' zusammen', year]
if math.isnan(value):
value = None
except KeyError:
return None
value = None
if value is None:
msg = "No BMWi electricity demand found for {year}."
raise ValueError(msg.format(year=year))
return value


if __name__ == "__main__":
Expand Down
36 changes: 20 additions & 16 deletions reegis/coastdat.py
Original file line number Diff line number Diff line change
Expand Up @@ -520,11 +520,11 @@ def store_average_weather(data_type, weather_path=None, years=None, keys=None,
ws, verify_integrity=True)

# calculate the average wind speed for one grid item
coastdat_polygons.loc[key, 'v_wind_avg'] = (
coastdat_polygons.loc[key, '{0}_avg'.format(data_type)] = (
data_type_avg.mean())

# Close hdf files
for year in years:
for year in used_years:
weather[year].close()

if keys is not None:
Expand Down Expand Up @@ -993,8 +993,7 @@ def scenario_feedin_wind(year, name, regions=None, feedin_ts=None,
# Rename columns and remove obsolete level
wind.columns = wind.columns.droplevel(2)
cols = wind.columns.get_level_values(1).unique()
rn = {c: c.replace('coastdat_{weather_year}_wind_'
.format(weather_year=weather_year), '') for c in cols}
rn = {c: c.replace('coastdat_{0}_wind_'.format(year), '') for c in cols}
wind.rename(columns=rn, level=1, inplace=True)
wind.sort_index(1, inplace=True)

Expand All @@ -1006,7 +1005,7 @@ def scenario_feedin_wind(year, name, regions=None, feedin_ts=None,
regions = wind.columns.get_level_values(0).unique()

if feedin_ts is None or len(feedin_ts.index) == 0:
cols = pd.MultiIndex(levels=[[], []], labels=[[], []])
cols = pd.MultiIndex(levels=[[], []], codes=[[], []])
feedin_ts = pd.DataFrame(index=wind.index, columns=cols)

for region in regions:
Expand Down Expand Up @@ -1048,7 +1047,7 @@ def scenario_feedin_pv(year, name, regions=None, feedin_ts=None,
regions = pv.columns.get_level_values(0).unique()

if feedin_ts is None or len(feedin_ts.index) == 0:
cols = pd.MultiIndex(levels=[[], []], labels=[[], []])
cols = pd.MultiIndex(levels=[[], []], codes=[[], []])
feedin_ts = pd.DataFrame(index=pv.index, columns=cols)

orientation_fraction = pd.Series(pv_orientation)
Expand All @@ -1067,7 +1066,7 @@ def scenario_feedin_pv(year, name, regions=None, feedin_ts=None,
return feedin_ts.sort_index(1)


def get_feedin_per_region(year, region, name, weather_year=None,
def get_feedin_per_region(year, region, name, weather_year=None, reset_pp=True,
windzones=True, subregion=False, pp=None):
"""
Aggregate feed-in time series for the given geometry set.
Expand All @@ -1078,6 +1077,7 @@ def get_feedin_per_region(year, region, name, weather_year=None,
region : geopandas.geoDataFrame
name : str
weather_year : int
reset_pp
windzones : bool
pp : pd.DataFrame or None
subregion : bool
Expand Down Expand Up @@ -1111,7 +1111,7 @@ def get_feedin_per_region(year, region, name, weather_year=None,
# Add a column named with the name parameter, adding the region id to
# each power plant
pp = powerplants.add_regions_to_powerplants(
region, name, filename=filename, path=path, dump=False, pp=pp,
region, name, filename=filename, path=path, dump=True, pp=pp,
subregion=subregion)

# Get only the power plants that are online in the given year.
Expand Down Expand Up @@ -1217,8 +1217,8 @@ def get_solar_time_series_for_one_location_all_years(latitude, longitude,
"""
path = os.path.join(cfg.get('paths', 'feedin'), 'coastdat')
years = os.listdir(path)
df = pd.DataFrame(columns=pd.MultiIndex(levels=[[], []], labels=[[], []]))
# years = [2012, 2013, 2014]
df = pd.DataFrame(columns=pd.MultiIndex(levels=[[], []], codes=[[], []]))

for year in years:
if os.path.isdir(os.path.join(path, str(year))):
tmp = get_solar_time_series_for_one_location(
Expand All @@ -1230,15 +1230,19 @@ def get_solar_time_series_for_one_location_all_years(latitude, longitude,

def federal_states_feedin_example():
"""Get fullload hours for renewable sources for a federal states."""
federal_states = geometries.load(
cfg.get('paths', 'geometry'),
cfg.get('geometry', 'federalstates_polygon'))
federal_states = geometries.get_federal_states_polygon()
get_feedin_per_region(2014, federal_states, 'federal_states')

return scenario_feedin(2014, 'federal_states')


if __name__ == "__main__":
logger.define_logging()
print(federal_states_feedin_example().sum())
print(federal_state_average_weather(2014, 'temp_air'))
powerplants.pp_opsd2reegis()
for my_year in [2014, 2013, 2012, 2011, 2010]:
my_federal_states = geometries.get_federal_states_polygon()
get_feedin_per_region(my_year, my_federal_states, 'federal_states',
reset_pp=False)
my_path = os.path.join(cfg.get('paths', 'feedin'), 'federal_states')
os.makedirs(my_path, exist_ok=True)
my_fn = os.path.join(my_path, 'federal_states_{0}'.format(my_year))
scenario_feedin(my_year, 'federal_states').to_csv(my_fn)
26 changes: 26 additions & 0 deletions reegis/data/geometries/federalstates_extended_polygon.geojson

Large diffs are not rendered by default.

7 changes: 7 additions & 0 deletions reegis/data/geometries/germany_awz_polygon.geojson

Large diffs are not rendered by default.

80 changes: 80 additions & 0 deletions reegis/demand.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
# -*- coding: utf-8 -*-

# -*- coding: utf-8 -*-

""" Download and prepare entsoe load profile from opsd data portal.
Copyright (c) 2016-2018 Uwe Krien <uwe.krien@rl-institut.de>
SPDX-License-Identifier: GPL-3.0-or-later
"""
__copyright__ = "Uwe Krien <uwe.krien@rl-institut.de>"
__license__ = "GPLv3"


# Python libraries
import logging

# External packages
import pandas as pd

# internal modules
import reegis.config as cfg
from reegis import entsoe
from reegis import geometries
from reegis import openego
from reegis import bmwi as bmwi_data


def get_slp_profile_by_region(region, year, annual, profile='entsoe'):
pass


def get_entsoe_profile_by_region(region, year, name, annual_demand=None):
"""
Parameters
----------
region
year
name
annual_demand
Returns
-------
"""
logging.debug("Get entsoe profile {0} for {1}".format(name, year))
de_load_profile = entsoe.get_entsoe_load(2014).DE_load_

load_profile = pd.DataFrame()

annual_region = openego.get_ego_demand_by_region(year, region, name)

share = annual_region.div(annual_region.sum())

for region in region.index:
if region not in share:
share[region] = 0
load_profile[region] = de_load_profile.multiply(float(share[region]))

if annual_demand == 'bmwi':
annual_demand = bmwi_data.get_annual_electricity_demand_bmwi(year)

if annual_demand is not None:
load_profile = load_profile.div(load_profile.sum().sum()).multiply(
annual_demand)
return load_profile


def get_electricity_profile_by_federal_states(year, profile=entsoe):
federal_states = geometries.load(
cfg.get('paths', 'geometry'),
cfg.get('geometry', 'federalstates_polygon'))
federal_states.set_index('iso', drop=True, inplace=True)
return get_entsoe_profile_by_region(federal_states, 'federal_states', year)


if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
print(get_electricity_profile_by_federal_states(2014)['BE'])
89 changes: 59 additions & 30 deletions reegis/entsoe.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,29 +23,30 @@
import dateutil
import pandas as pd

# oemof packages
from oemof.tools import logger

# internal modules
import reegis.config as cfg


def read_original_timeseries_file(overwrite=False):
"""Read timeseries file if it exists. Otherwise download it from opsd.
"""

orig_csv_file = os.path.join(cfg.get('paths', 'entsoe'),
cfg.get('entsoe', 'original_file'))
readme = os.path.join(cfg.get('paths', 'entsoe'),
cfg.get('entsoe', 'readme_file'))
json = os.path.join(cfg.get('paths', 'entsoe'),
cfg.get('entsoe', 'json_file'))
version = cfg.get('entsoe', 'time_series_version')
orig_csv_file = os.path.join(
cfg.get('paths', 'entsoe'),
cfg.get('entsoe', 'original_file')).format(version=version)
readme = os.path.join(
cfg.get('paths', 'entsoe'),
cfg.get('entsoe', 'readme_file')).format(version=version)
json = os.path.join(
cfg.get('paths', 'entsoe'),
cfg.get('entsoe', 'json_file')).format(version=version)

version = cfg.get('entsoe', 'timeseries_version')

if not os.path.isfile(orig_csv_file) or overwrite:
req = requests.get(cfg.get('entsoe', 'timeseries_data').format(
version=version))
req = requests.get(
cfg.get('entsoe', 'timeseries_data').format(version=version))

if not overwrite:
logging.warning("File not found. Try to download it from server.")
else:
Expand All @@ -57,26 +58,26 @@ def read_original_timeseries_file(overwrite=False):
logging.warning("Downloaded from {0} and copied to '{1}'.".format(
cfg.get('entsoe', 'timeseries_data').format(version=version),
orig_csv_file))
req = requests.get(cfg.get('entsoe', 'timeseries_readme').format(
version=version))
req = requests.get(
cfg.get('entsoe', 'timeseries_readme').format(version=version))
with open(readme, 'wb') as fout:
fout.write(req.content)
req = requests.get(cfg.get('entsoe', 'timeseries_json').format(
version=version))
req = requests.get(
cfg.get('entsoe', 'timeseries_json').format(version=version))
with open(json, 'wb') as fout:
fout.write(req.content)

logging.debug("Reading file: {0}".format(orig_csv_file))
orig = pd.read_csv(orig_csv_file, index_col=[0], parse_dates=True,
date_parser=lambda col: pd.to_datetime(col, utc=True))
orig = pd.read_csv(orig_csv_file, index_col=[0], parse_dates=True)
orig = orig.tz_convert('Europe/Berlin')
return orig


def prepare_de_file(overwrite=False):
"""Convert demand file. CET index and Germany's load only."""
de_file = os.path.join(cfg.get('paths', 'entsoe'),
cfg.get('entsoe', 'de_file'))
version = cfg.get('entsoe', 'time_series_version')
de_file = os.path.join(
cfg.get('paths', 'entsoe'),
cfg.get('entsoe', 'de_file').format(version=version))
if not os.path.isfile(de_file) or overwrite:
ts = read_original_timeseries_file(overwrite)
for col in ts.columns:
Expand All @@ -88,8 +89,10 @@ def prepare_de_file(overwrite=False):

def split_timeseries_file(overwrite=False, csv=False):
logging.info("Splitting time series.")
version = cfg.get('entsoe', 'time_series_version')
path_pattern = os.path.join(cfg.get('paths', 'entsoe'), '{0}')
de_file = path_pattern.format(cfg.get('entsoe', 'de_file'))
de_file = path_pattern.format(cfg.get('entsoe', 'de_file').format(
version=version))

if not os.path.isfile(de_file) or overwrite:
prepare_de_file(overwrite)
Expand Down Expand Up @@ -123,19 +126,29 @@ def split_timeseries_file(overwrite=False, csv=False):
renewables = de_ts.dropna(subset=re_subset, how='any')[re_columns]

if csv:
load_file = path_pattern.format(cfg.get('entsoe', 'load_file_csv'))
load_file = path_pattern.format(
cfg.get('entsoe', 'load_file_csv').format(version=version))
else:
load_file = path_pattern.format(cfg.get('entsoe', 'load_file'))
load_file = path_pattern.format(
cfg.get('entsoe', 'load_file').format(version=version))

if not os.path.isfile(load_file) or overwrite:
if csv:
load.to_csv(load_file)
else:
load.to_hdf(load_file, 'entsoe')

re_file = path_pattern.format(cfg.get('entsoe', 'renewables_file'))
if csv:
re_file = path_pattern.format(
cfg.get('entsoe', 'renewables_file_csv').format(version=version))
else:
re_file = path_pattern.format(
cfg.get('entsoe', 'renewables_file').format(version=version))
if not os.path.isfile(re_file) or overwrite:
renewables.to_csv(re_file)
if csv:
renewables.to_csv(re_file)
else:
renewables.to_hdf(re_file, 're')


def prepare_entsoe_timeseries(overwrite=False):
Expand All @@ -152,11 +165,27 @@ def get_entsoe_load(year):
f = pd.datetime(year, 1, 1, 0)
t = pd.datetime(year, 12, 31, 23)
logging.info("Read entsoe load series from {0} to {1}".format(f, t))
df = pd.read_hdf(filename, 'entsoe')
df = pd.DataFrame(pd.read_hdf(filename, 'entsoe'))
return df.loc[f:t]


def get_entsoe_renewable_data(csv=False, overwrite=False):
version = cfg.get('entsoe', 'time_series_version')
path_pattern = os.path.join(cfg.get('paths', 'entsoe'), '{0}')
if csv:
fn = path_pattern.format(
cfg.get('entsoe', 'renewables_file_csv').format(version=version))
else:
fn = path_pattern.format(
cfg.get('entsoe', 'renewables_file').format(version=version))
if not os.path.isfile(fn):
split_timeseries_file(csv=csv)
if csv:
re = pd.read_csv(fn, index_col=[0], parse_dates=True)
else:
re = pd.DataFrame(pd.read_hdf(fn, 're'))
return re


if __name__ == "__main__":
logger.define_logging()
print(get_entsoe_load(2014))
logging.info("Done!")
pass
2 changes: 1 addition & 1 deletion reegis/feedin.py
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ def feedin_windpowerlib(weather, turbine, installed_capacity=1):
>>> turbine = {
... 'hub_height': 135,
... 'rotor_diameter': 127,
... 'name': 'E-141/4200',
... 'name': 'E-82/2300',
... 'nominal_power': 4200000,
... 'fetch_curve': 'power_coefficient_curve'}
>>> data_height = cfg.get_dict('coastdat_data_height')
Expand Down

0 comments on commit 41998e9

Please sign in to comment.