From f838a58d041f24ebb2b6fc498d7e88e190294e06 Mon Sep 17 00:00:00 2001 From: beierd Date: Thu, 13 Aug 2020 11:27:36 +0200 Subject: [PATCH 1/9] Adds module to calculate heat pump COPs with oemof thermal --- src/scenario_builder/cop_precalc.py | 246 ++++++++++++++++++++++++++++ 1 file changed, 246 insertions(+) create mode 100644 src/scenario_builder/cop_precalc.py diff --git a/src/scenario_builder/cop_precalc.py b/src/scenario_builder/cop_precalc.py new file mode 100644 index 0000000..1cbec43 --- /dev/null +++ b/src/scenario_builder/cop_precalc.py @@ -0,0 +1,246 @@ +import os +import oemof.thermal.compression_heatpumps_and_chillers as cmpr_hp_chiller +import pandas as pd +import numpy as np +from disaggregator import data +from reegis import coastdat +from reegis import config as cfg + + +def flow_temperature_dependent_on_ambient(t_max, t_min, t_amb): + """ + Parameters + ---------- + t_max : int + Maximum flow temperature of heating system + t_min : int + Minimum flow temperature of heating system + t_amb : pd.Series + Ambient temperature + + Returns: list + List containing flow temperature depending on heatload/ambient temperature + ------- + """ + + delta_t_flow = t_max - t_min + delta_t_amb = 30 + T_flow = [t_max - (delta_t_flow/delta_t_amb) * i for i in range(31)] + T_round = np.round(t_amb) + + t_ind = np.zeros(shape=(len(t_amb))) + tvl = np.zeros(shape=(len(t_amb))) # Vorlauftemperatur + for i in range(len(t_amb)): + if T_round[i] < -14: + t_ind[i] = 1 + elif T_round[i] >= -14 and T_round[i] < 0: + t_ind[i] = abs(15 + T_round[i]) # nimm erste 15 Stellen bei -10 bis 0°C + elif T_round[i] >= 0: + t_ind[i] = min(31, 16 + T_round[i]) + tvl[i] = int(max(t_min, T_flow[int(t_ind[i] - 1)])) + + tvl = list(tvl) + + return tvl + + +def calculate_COP(t_flow, t_low, quality_grade): + """ + Parameters + ---------- + t_flow : int + Maximum flow temperature of heating system + t_low : Series + Minimum flow temperature of heating system + quality_grade : float + Ambient temperature + + Returns: list + List containing flow temperature depending on heatload/ambient temperature + ------- + """ + + cops = cmpr_hp_chiller.calc_cops( + temp_high=list([t_flow]), + temp_low=t_low, + quality_grade=quality_grade, + mode='heat_pump', + temp_threshold_icing=2, + factor_icing=0.8) + + return cops + + +def calculate_dynamic_COP(t_flow, t_low, quality_grade): + r""" + This function calculates COPs of a heatpump. Both, the flow temperature and the heat source must be given + as a timeseries and can very for each timestep. + + Parameters + ---------- + t_flow : Series + Flow temperature series + t_low : Series + Heat source of heatpump + quality_grade : float + Carnot efficiency factor of heat pump + + Returns: Series + Series containing COPs + ------- + """ + cops = [] + for i in range(0, len(t_flow)): + tmp_high = list([t_flow[i]]) + tmp_low = list([t_low[i]]) + + cop = cmpr_hp_chiller.calc_cops( + temp_high=tmp_high, + temp_low=tmp_low, + quality_grade=quality_grade, + mode='heat_pump', + temp_threshold_icing=2, + factor_icing=0.8) + + cops.append(cop[0]) + + cops = pd.Series(cops) + + return cops + + +def calculate_mixed_COPS_per_region(t_amb, t_ground, quality_grade, share_ashp=0.7, share_gshp=0.3): + """ + Parameters + ---------- + t_amb : Series + Ambient temperature + t_ground : Series + Ground temperature for GSHP + quality_grade : float + Ambient temperature + share_ashp: Float + Share of air sourced heat pumps + share_gshp: Float + Share of ground sourced heat pumps + + Returns: DataFrame + DataFrame containing some COP series + ------- + """ + + t_flow = dict(high = 75, medium=55, low = 40) + + # Calculate flow temperature for different temperature levels + tvl75 = flow_temperature_dependent_on_ambient(t_flow['high'], 50, t_amb) + tvl50 = flow_temperature_dependent_on_ambient(t_flow['medium'], 30 , t_amb) + tvl35 = flow_temperature_dependent_on_ambient(t_flow['low'], 30 , t_amb) + + # Calculate COPs for air sourced heat pump + cop75_ASHP = calculate_dynamic_COP(tvl75, t_amb, quality_grade) + cop50_ASHP = calculate_dynamic_COP(tvl50, t_amb, quality_grade) + cop35_ASHP = calculate_dynamic_COP(tvl35, t_amb, quality_grade) + copwater_ASHP = calculate_dynamic_COP(np.ones(shape=8760)*50, t_amb, quality_grade) + + # Calculate COPs for ground sourced heat pump + cop75_GSHP = calculate_dynamic_COP(tvl75, t_ground, quality_grade) + cop50_GSHP = calculate_dynamic_COP(tvl50, t_ground, quality_grade) + cop35_GSHP = calculate_dynamic_COP(tvl35, t_ground, quality_grade) + copwater_GSHP = calculate_dynamic_COP(np.ones(shape=8760)*50, t_ground, quality_grade) + + cops_aggregated = pd.DataFrame(columns=['COP35_air', 'COP50_air', 'COP75_air', 'COP35_ground', 'COP50_ground', + 'COP75_ground', 'COPwater_air', 'COPwater_ground','COP_mean' ]) + + # Write COP-Series to DataFrame + cops_aggregated['COP35_air'] = cop35_ASHP + cops_aggregated['COP50_air'] = cop50_ASHP + cops_aggregated['COP75_air'] = cop75_ASHP + cops_aggregated['COP35_ground'] = cop35_GSHP + cops_aggregated['COP50_ground'] = cop50_GSHP + cops_aggregated['COP75_ground'] = cop75_GSHP + cops_aggregated['COPwater_air'] = copwater_ASHP + cops_aggregated['COPwater_ground'] = copwater_GSHP + + # Calculate mean COP and add it to DataFrame + cop_air_mean = (cop35_ASHP + cop50_ASHP + cop75_ASHP) / 3 + cop_ground_mean = (cop35_GSHP + cop50_GSHP + cop75_GSHP) / 3 + cop_mean = share_ashp * cop_air_mean + share_gshp * cop_ground_mean + cops_aggregated['COP_mean'] = cop_mean + + # Limit COP to 7 + limit = cops_aggregated >= 7 + cops_aggregated[limit] = 7 + + return cops_aggregated + + +def calculate_mixed_cops_by_nuts3(year, name, share_ashp=0.7, share_gshp=0.3, quality_grade=0.4): + """ + Parameters + ---------- + year: int + Year of interest + name: string + Name of the analysed set + share_ashp: Float + Share of air sourced heat pumps + share_gshp: Float + Share of ground sourced heat pumps + quality_grade : float + Ambient temperature + + Returns: 2 DataFrames + DataFrames containing mean COP series for each German NUTS3 region + ------- + """ + + fn_pattern = "mixed_cops_by_nuts3_{name}_{year}.csv".format(name=name, year=year) + fn_pattern_water = "cops_by_nuts3_{name}_{year}_water.csv".format(name=name, year=year) + fn = os.path.join(cfg.get("paths", "cop_precalc"), fn_pattern) + fn_water = os.path.join(cfg.get("paths", "cop_precalc"), fn_pattern_water) + + if not os.path.isfile(fn): + share_ashp = share_ashp + share_gshp = share_gshp + quality_grade = quality_grade + t_ground = pd.Series(np.ones(8760)*10) + + outfile = os.path.join(cfg.get("paths", "cop_precalc"), "average_temp_by_nuts3_{year}.csv".format(year=year)) + + if not os.path.isfile(outfile): + + # Load NUTS3-geometries + nuts3 = data.database_shapes() + nuts3 = nuts3.to_crs(crs=4326) + + # Get average temperature per NUTS3, coastdat only available until 2014 + coastdat.spatial_average_weather(year, nuts3, 'temp_air', 'deTemp', outfile=outfile) + NUTS3_temp = pd.read_csv(outfile) + NUTS3_temp.drop('Unnamed: 0', axis='columns', inplace=True) + + else: + NUTS3_temp = pd.read_csv(outfile) + NUTS3_temp.drop('Unnamed: 0', axis='columns', inplace=True) + + # Create empty DataFrames + COP_NUTS3 = pd.DataFrame(index=pd.date_range('1/1/'+str(year), periods=8760, freq='H'), + columns=NUTS3_temp.columns) + COP_NUTS3_water = pd.DataFrame(index=pd.date_range('1/1/'+str(year), periods=8760, freq='H'), + columns=NUTS3_temp.columns) + + # Loop through NUTS3-regions and calculate mixed COP for each region + for r in COP_NUTS3.columns: + tmp_cop = calculate_mixed_COPS_per_region(NUTS3_temp[r]-273.15, t_ground, quality_grade=quality_grade, + share_ashp=share_ashp, share_gshp=share_gshp) + tmp_cop.set_index(COP_NUTS3.index, inplace=True) + COP_NUTS3[r] = tmp_cop['COP_mean'] + COP_NUTS3_water[r] = tmp_cop['COPwater_air']* share_ashp + tmp_cop['COPwater_ground'] * share_gshp + + COP_NUTS3.to_csv(fn) + COP_NUTS3_water.to_csv(fn_water) + + else: + COP_NUTS3 = pd.read_csv(fn) + COP_NUTS3_water = pd.read_csv(fn_water) + + return COP_NUTS3, COP_NUTS3_water From 02e3b3bb97956a71773fac0283e9543741519613 Mon Sep 17 00:00:00 2001 From: beierd Date: Mon, 17 Aug 2020 15:04:35 +0200 Subject: [PATCH 2/9] Adds functions to process e-mobility charging profiles --- src/scenario_builder/emobpy_processing.py | 174 ++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100644 src/scenario_builder/emobpy_processing.py diff --git a/src/scenario_builder/emobpy_processing.py b/src/scenario_builder/emobpy_processing.py new file mode 100644 index 0000000..8f5a285 --- /dev/null +++ b/src/scenario_builder/emobpy_processing.py @@ -0,0 +1,174 @@ +from emobpy import DataBase +import pandas as pd +import os +import numpy as np +from reegis import config as cfg + + +def get_charging_profiles_from_database(path): + """ + This function can be used to process results obtained with the library emobpy by DIW Berlin. + It takes a path to data as input and returns the summed charging power. + + Parameters + ---------- + path: String + Path to a folder with stored driving, availability and charging profiles + + Returns: DataFrame + Summed charging power for 4 different charging strategies + ------- + """ + + # Load profiles from Files + manager = DataBase(path) + manager.update() + + # Liste mit Availability Profilen + keys_driving = [k for k, v in manager.db.items() if v['kind'] == 'driving'] + keys_availability = [k for k, v in manager.db.items() if v['kind'] == 'availability'] + keys_charging = [k for k, v in manager.db.items() if v['kind'] == 'charging'] + keys_immediate = [k for k, v in manager.db.items() if v['kind'] == 'charging' and v['option'] == 'immediate' ] + keys_balanced = [k for k, v in manager.db.items() if v['kind'] == 'charging' and v['option'] == 'balanced' ] + keys_23to8 = [k for k, v in manager.db.items() if v['kind'] == 'charging' and v['option'] == 'from_23_to_8_at_home'] + keys_0to24 = [k for k, v in manager.db.items() if v['kind'] == 'charging' and v['option'] == 'from_0_to_24_at_home'] + + # Summenprofil für Fahrleistung in kmd + driving_profiles = pd.DataFrame() + for k in keys_driving: + test = manager.db[k]["timeseries"]["consumption"] + driving_profiles = pd.concat([driving_profiles, test], axis=1) + + #cum_profile = driving_profiles.sum(axis=1) + + + # Summenprofil für Ladeleistung (immediate) + ch_profiles_immediate = pd.DataFrame() + for k in keys_immediate: + tmp = manager.db[k]["timeseries"]["charge_grid"] + ch_profiles_immediate = pd.concat([ch_profiles_immediate, tmp], axis=1) + + P_immediate = ch_profiles_immediate.sum(axis=1) + + + # Summenprofil für Ladeleistung (balanced) + ch_profiles_balanced = pd.DataFrame() + for k in keys_balanced: + tmp = manager.db[k]["timeseries"]["charge_grid"] + ch_profiles_balanced = pd.concat([ch_profiles_balanced, tmp], axis=1) + + P_balanced = ch_profiles_balanced.sum(axis=1) + + + # Summenprofil für Ladeleistung (23 to 8) + ch_profiles_23to8 = pd.DataFrame() + for k in keys_23to8: + tmp = manager.db[k]["timeseries"]["charge_grid"] + ch_profiles_23to8 = pd.concat([ch_profiles_23to8, tmp], axis=1) + + P_23to8 = ch_profiles_23to8.sum(axis=1) + + + # Summenprofil für Ladeleistung (0 to 24) + ch_profiles_0to24 = pd.DataFrame() + for k in keys_0to24: + tmp = manager.db[k]["timeseries"]["charge_grid"] + ch_profiles_0to24 = pd.concat([ch_profiles_0to24, tmp], axis=1) + + P_0to24 = ch_profiles_0to24.sum(axis=1) + + P_sum = pd.concat([P_immediate, P_balanced, P_23to8, P_0to24], axis=1) + P_sum.columns = ['immediate', 'balanced', '23to8', '0to24'] + + return P_sum + + +def return_normalized_charging_series(df): + """ + This function normalizes profiles so that the sum of the timeseries is 1. The profiles can then be scaled to + a user defined energy consumption of BEV charging. + + Parameters + ---------- + df: DataFrame + Dataframe with 4 charging timereries + + Returns: DataFrame + Normalized charging series + ------- + """ + + # Cut off initial charging + df.iloc[0:48] = df.iloc[48:96].values + idx = pd.DatetimeIndex(df.index, freq='30min') + df.set_index(idx, inplace=True) + p_immediate = df['immediate'] + p_balanced = df['balanced'] + p_23to8 = df['23to8'] + p_0to24 = df['0to24'] + + # Resample to hourly values + immediate_hourly = p_immediate.resample('H').sum() + balanced_hourly = p_balanced.resample('H').sum() + hourly_23to8 = p_23to8.resample('H').sum() + hourly_0to24 = p_0to24.resample('H').sum() + + # Normalize Yearly energy use to 1 + immediate_norm = immediate_hourly * (1 / immediate_hourly.sum()) + balanced_norm = balanced_hourly * (1 / balanced_hourly.sum()) + norm_23to8 = hourly_23to8 * (1 / hourly_23to8 .sum()) + norm_0to24 = hourly_0to24 * (1 / hourly_0to24.sum()) + + P_sum_norm = pd.concat([immediate_norm, balanced_norm, norm_23to8, norm_0to24], axis=1) + smaller_zero = P_sum_norm < 0 + P_sum_norm[smaller_zero] = 0 + + return P_sum_norm + + +def return_sum_charging_power(path=None): + """ + This function returns a DataFrame with summed charging power series. Prerequisite is the calculation of profiles + with the library emobpy by DIW Berlin. If the function is run for the first time a path to data must be provided. + + Parameters + ---------- + path: String + Path to a directory containing at least one folder with charging power series files. + + Returns: DataFrame + Summed charging profiles + ------- + """ + fn = os.path.join(cfg.get("paths", "scenario_data"), 'sum_charging_power.csv') + + if not os.path.isfile(fn): + os.chdir(path) + dirs = os.listdir(os.getcwd()) + result_dict = dict.fromkeys(dirs) + + for dir in result_dict.keys(): + path = os.path.join(os.getcwd(), dir) + result_dict[dir] = get_charging_profiles_from_database(path) + + charging_types = result_dict[dir].columns + idx = result_dict[dir].index + P_sum = pd.DataFrame(index=idx, columns=charging_types) + len_series = len(result_dict[list(result_dict.keys())[0]]['immediate']) + + for ch_type in charging_types: + sum_temp = pd.Series(np.zeros(shape=len_series), index=idx) + + for key in result_dict.keys(): + P_tmp = result_dict[key][ch_type] + sum_temp = sum_temp.add(P_tmp, fill_value=0) + + P_sum[ch_type] = sum_temp + + P_sum.to_csv(fn) + + else: + P_sum = pd.read_csv(fn) + P_sum.set_index('Unnamed: 0', drop=True, inplace=True) + + return P_sum From 2b2135789933197044914e8ca9aaa441c295e6f6 Mon Sep 17 00:00:00 2001 From: beierd Date: Wed, 26 Aug 2020 14:22:19 +0200 Subject: [PATCH 3/9] Some changes in emobpy processing module --- src/scenario_builder/emobpy_processing.py | 31 ++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/src/scenario_builder/emobpy_processing.py b/src/scenario_builder/emobpy_processing.py index 8f5a285..8506fb4 100644 --- a/src/scenario_builder/emobpy_processing.py +++ b/src/scenario_builder/emobpy_processing.py @@ -3,7 +3,7 @@ import os import numpy as np from reegis import config as cfg - +from matplotlib import pyplot as plt def get_charging_profiles_from_database(path): """ @@ -100,6 +100,8 @@ def return_normalized_charging_series(df): # Cut off initial charging df.iloc[0:48] = df.iloc[48:96].values + # Cut off end charging to intial SoC + df.iloc[len(df)-48:len(df)] = df.iloc[len(df)-96:len(df)-48].values idx = pd.DatetimeIndex(df.index, freq='30min') df.set_index(idx, inplace=True) p_immediate = df['immediate'] @@ -123,6 +125,10 @@ def return_normalized_charging_series(df): smaller_zero = P_sum_norm < 0 P_sum_norm[smaller_zero] = 0 + for n in ['immediate', 'balanced', '23to8', '0to24']: + inc_fac = 1 / P_sum_norm[n].sum() + P_sum_norm[n] = P_sum_norm[n].multiply(inc_fac) + return P_sum_norm @@ -172,3 +178,26 @@ def return_sum_charging_power(path=None): P_sum.set_index('Unnamed: 0', drop=True, inplace=True) return P_sum + + +def return_averaged_charging_series(weight_im=0.4, weight_bal=0.4, weight_night=0.2): + cs = return_sum_charging_power() + cs_norm = return_normalized_charging_series(cs) + + im = cs_norm["immediate"] + bal = cs_norm["balanced"] + night = cs_norm["23to8"] + + P_charge = weight_im * im + weight_bal * bal + weight_night * night + + return P_charge + + +def plot_charging_series_comparison(df, df_mean): + fig, axs = plt.subplots(5) + fig.suptitle('Charging strategy comparison') + axs[0].plot(df["immediate"]), axs[0].set_title('Immediate') + axs[1].plot(df["balanced"]), axs[1].set_title('balanced') + axs[2].plot(df["0to24"]), axs[2].set_title('0to24') + axs[3].plot(df["23to8"]), axs[3].set_title('23to8') + axs[4].plot(df_mean), axs[4].set_title('Averaged series') From 68eb63e20945deb4d6cd7ad543febdee5a4929df Mon Sep 17 00:00:00 2001 From: beierd Date: Wed, 26 Aug 2020 14:23:00 +0200 Subject: [PATCH 4/9] Adds module to calculate scenario heatloads --- .../heatload_scenario_calculator.py | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 src/scenario_builder/heatload_scenario_calculator.py diff --git a/src/scenario_builder/heatload_scenario_calculator.py b/src/scenario_builder/heatload_scenario_calculator.py new file mode 100644 index 0000000..0727836 --- /dev/null +++ b/src/scenario_builder/heatload_scenario_calculator.py @@ -0,0 +1,192 @@ +from disaggregator import data, spatial +from reegis import geometries as geo, config as cfg +import pandas as pd +import os + +def get_household_heatload_by_NUTS3_scenario( + m_type, weight_by_income=True +): + + """ + Parameters + ---------- + region_pick : list + Selected regions in NUTS-3 format + m_type: int + 1 = Status Quo, 2 = Conventional modernisation, 3 = Future modernisation + weight_by_income : bool + Choose whether heat demand shall be weighted by household income + + Returns: pd.DataFrame + Dataframe containing yearly household load for selection + ------- + """ + # Abweichungen in den Jahresmengen bei bottom-up + qdem_temp= spatial.disagg_households_heatload_DB_scenario( + m_type, weight_by_income=weight_by_income + ) + + qdem = qdem_temp.sum(axis=1) + + return qdem + + +def get_CTS_heatload_scenario(region_pick, efficiency_gain=0.5): + """ + Parameters + ---------- + region_pick : list + Selected regions in NUTS-3 format + efficiency_gain: float + Reduction factor for heatload due to increased CTS building efficiency + (0.99 equals 99% reduction, 0% equals no reduction) + + Returns: pd.DataFrame + Dataframe containing yearly heat CTS heat consumption by NUTS-3 region + ------- + """ + + # Define year of interest + data.cfg["base_year"] = 2015 + # Get gas consumption of defined year and divide by gas-share in end energy use for heating + heatload_hh = data.gas_consumption_HH().sum() / 0.47 + # Multiply with CTS heatload share, Assumption: Share is constant because heatload mainly depends on wheather + heatload_CTS_2015 = 0.37 * heatload_hh # Verhältnis aus dem Jahr 2017 + # Assumption: Heatload is reduced equally for all regions + heatload_CTS = heatload_CTS_2015 * (1-efficiency_gain) + # Calculate CTS gas consumption by economic branch and NUTS3-region + gc_CTS = spatial.disagg_CTS_industry( + sector="CTS", source="gas", use_nuts3code=True + ) + # Sum up the gas consumption per NUTS3-region + sum_gas_CTS = gc_CTS.sum().sum() + # Calculate scaling factor + inc_fac = heatload_CTS / sum_gas_CTS + # Calculate CTS heatload: Assumption: Heatload correlates strongly with gas consumption + gc_CTS_new = gc_CTS.multiply(inc_fac) + # Select heatload of NUTS3-regions of interest + gc_CTS_combined = gc_CTS_new.sum() + df = gc_CTS_combined[region_pick] + + return df + + +def get_industry_heating_hotwater_scenario(region_pick, efficiency_gain=0.5): + """ + Parameters + ---------- + region_pick : list + Selected regions in NUTS-3 format + efficiency_gain: float + Reduction factor for heatload due to increased CTS building efficiency + (0.99 equals 99% reduction, 0% equals no reduction) + + Returns: pd.DataFrame + Dataframe containing yearly industry heat consumption by NUTS-3 region + ------- + """ + + # Define year of interest + data.cfg["base_year"] = 2015 + # Get gas consumption of defined year and divide by gas-share in end energy use for heating + heatload_hh = data.gas_consumption_HH().sum() / 0.47 + # Multiply with industries heatload share, Assumption: Share is constant because heatload mainly depends on wheather + heatload_industry_2015 = 0.089 * heatload_hh # Verhältnis aus dem Jahr 2017 + heatload_industry = heatload_industry_2015 * (1-efficiency_gain) + # Calculate industry gas consumption by economic branch and NUTS3-region + gc_industry = spatial.disagg_CTS_industry( + sector="industry", source="gas", use_nuts3code=True + ) + # Sum up the gas consumption per NUTS3-region + sum_gas_industry = gc_industry.sum().sum() + # Calculate scaling factor + inc_fac = heatload_industry / sum_gas_industry + # Calculate indsutries heatload: Assumption: Heatload correlates strongly with gas consumption + gc_industry_new = gc_industry.multiply(inc_fac) + gc_industry_combined = gc_industry_new.sum() + # Select heatload of NUTS3-regions of interest + df = gc_industry_combined[region_pick] + + return df + + +def get_industry_CTS_process_heat_scenario(region_pick, efficiency_gain=0.2): + """ + Parameters + ---------- + region_pick : list + Selected regions in NUTS-3 format + efficiency_gain: float + Reduction factor for heatload due to increased CTS building efficiency + (0.99 equals 99% reduction, 0% equals no reduction) + + Returns: pd.DataFrame + Dataframe containing yearly industry heat consumption by NUTS-3 region + ------- + """ + + # Select year + data.cfg["base_year"] = 2015 + # Get industrial gas consumption by NUTS3 + gc_industry = spatial.disagg_CTS_industry( + sector="industry", source="gas", use_nuts3code=True + ) + sum_gas_industry = gc_industry.sum().sum() + # Calculate factor of process heat consumption to gas consumption. + # Assumption: Process heat demand correlates with gas demand + process_heat_2015 = (515 + 42) * 1e6 + process_heat = process_heat_2015 * (1-efficiency_gain) + inc_fac = process_heat / sum_gas_industry + # Calculate process heat with factor + ph_industry = gc_industry.multiply(inc_fac) + ph_industry_combined = ph_industry.sum() + # Select process heat consumptions for NUTS3-Regions of interest + df = ph_industry_combined[region_pick] + + return df + + +def get_combined_heatload_for_region_scenario(name, region_pick=None, m_type=2 , eff_gain_CTS=0, + eff_gain_ph=0, eff_gain_ihw=0): + """ + Parameters + ---------- + year : int + Year of interest, so far only 2015 and 2016 are valid inputs + name: string + Name of scenario + region_pick : list + Selected regions in NUTS-3 format, if None function will return demand for all regions + + Returns: pd.DataFrame + Dataframe containing aggregated yearly low temperature heat demand (households, CTS, industry) as well + as high temperature heat demand (ProcessHeat) for selection + ------- + """ + if region_pick is None: + nuts3_index = data.database_shapes().index # Select all NUTS3 Regions + + fn_pattern = "heat_consumption_by_nuts3_{name}.csv".format(name=name) + fn = os.path.join(cfg.get("paths", "disaggregator"), fn_pattern) + + if not os.path.isfile(fn): + tmp0 = get_household_heatload_by_NUTS3_scenario( + m_type, weight_by_income=True + ) # Nur bis 2016 + tmp1 = get_CTS_heatload_scenario(nuts3_index, eff_gain_CTS) # 2015 - 2035 (projection) + tmp2 = get_industry_heating_hotwater_scenario(nuts3_index, eff_gain_ihw) + tmp3 = get_industry_CTS_process_heat_scenario(nuts3_index, eff_gain_ph) + + df_heating = pd.concat([tmp0, tmp1, tmp2, tmp3], axis=1) + df_heating.columns = ["Households", "CTS", "Industry", "ProcessHeat"] + df_heating.to_csv(fn) + + else: + df_heating = pd.read_csv(fn) + df_heating.set_index("nuts3", drop=True, inplace=True) + + return df_heating + + +test = get_combined_heatload_for_region_scenario('test123', region_pick=None, m_type=2 , eff_gain_CTS=0, + eff_gain_ph=0, eff_gain_ihw=0) From 783fd36ecf724f1baa67d472f7a60e985454d691 Mon Sep 17 00:00:00 2001 From: beierd Date: Wed, 26 Aug 2020 14:23:27 +0200 Subject: [PATCH 5/9] Adds module with help functions --- src/scenario_builder/snippets.py | 88 ++++++++++++++++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 src/scenario_builder/snippets.py diff --git a/src/scenario_builder/snippets.py b/src/scenario_builder/snippets.py new file mode 100644 index 0000000..6edbb6b --- /dev/null +++ b/src/scenario_builder/snippets.py @@ -0,0 +1,88 @@ +import pandas as pd +from reegis import geometries as geo_reegis +from deflex import geometries as geo_deflex +from reegis import land_availability_glaes, demand_disaggregator +from disaggregator import data + +def get_cost_emission_scenario_data(path_to_data): + + commodity_sources = dict.fromkeys({'StatusQuo', 'NEP2030', 'AllElectric', 'SynFuel'}) + + for n in commodity_sources.keys(): + cost_data = pd.read_excel(path_to_data, n) + cost_data.set_index('Unnamed: 0', drop=True, inplace=True) + cost_data.drop(['costs', 'emission_cost', 'carbon_price'], inplace=True) + commodity_sources[n] = cost_data + + return commodity_sources + + +def return_normalized_domestic_profiles(regions, df): + + test = df.groupby(level=[0,2], axis=1).sum() + profile_domestic = pd.DataFrame(index=test.index, columns=regions.index[0:18] ) + + for reg in regions.index[0:18]: + profile_domestic[reg] = test[reg]["domestic"] + test[reg]["retail"] + + profile_domestic = profile_domestic.div(profile_domestic.sum()) + + return profile_domestic + + +def return_normalized_industrial_profiles(regions,df): + + test = df.groupby(level=[0, 2], axis=1).sum() + profile_domestic = pd.DataFrame(index=test.index, columns=regions.index[0:18]) + + for reg in regions.index[0:18]: + profile_domestic[reg] = test[reg]["industrial"] + + profile_industrial = profile_domestic.div(profile_domestic.sum()) + + return profile_industrial + + +def transform_NEP_capacities_to_de21(path_to_NEP_capacities): + de21 = geo_deflex.deflex_regions(rmap='de21') + fed_states = geo_reegis.get_federal_states_polygon() + nuts3_index = data.database_shapes().index + + # Load NEP capacities + NEP2030_capacity = pd.read_excel(path_to_NEP_capacities) + NEP2030_capacity.set_index('fedstate', drop=True, inplace=True) + NEP2030_capacity = NEP2030_capacity.multiply(1e3) + + #Fetch GLAES RES capacities and compare to NEP data + glaes_capacity = land_availability_glaes.aggregate_capacity_by_region(fed_states) + compare_RES = pd.concat([glaes_capacity, NEP2030_capacity['onshore'], NEP2030_capacity['offshore'], + NEP2030_capacity['solar pv']], axis=1) + + compare_RES.drop(['N0', 'N1', 'O0', 'P0'], axis=0, inplace=True) + + scaling_wind = compare_RES['onshore'] / compare_RES['P_wind'] + scaling_pv = compare_RES['solar pv'] / compare_RES['P_pv'] + + mapped_nuts = demand_disaggregator.get_nutslist_for_regions(fed_states) + res_capacity_nuts3 = land_availability_glaes.get_pv_wind_capacity_potential_by_nuts3() + + # Zuordnung der installierten Leistungen zu den jeweiligen Landkreisen + P_NEP_nuts3 = pd.DataFrame(index=nuts3_index, columns=['onshore', 'pv']) + + for zone in compare_RES.index: + region_pick = mapped_nuts.loc[zone] + for nuts3 in region_pick.iloc[0]: + P_NEP_nuts3.loc[nuts3]['onshore'] = res_capacity_nuts3['P_wind'][nuts3] * scaling_wind[zone] + P_NEP_nuts3.loc[nuts3]['pv'] = res_capacity_nuts3['P_pv'][nuts3] * scaling_pv[zone] + + NEP_capacity_de21 = pd.DataFrame( + index=de21.index, columns=["P_wind", "P_pv"] + ) + nuts3_list_de21 = demand_disaggregator.get_nutslist_for_regions(de21) + + for zone in de21.index: + idx = nuts3_list_de21.loc[zone]["nuts"] + NEP_capacity_de21.loc[zone]["P_wind"] = P_NEP_nuts3["onshore"][idx].sum() + NEP_capacity_de21.loc[zone]["P_pv"] = P_NEP_nuts3["pv"][idx].sum() + + return NEP_capacity_de21 From 8a64e39d8fd23eba01c879edfbf30bb7e16cc64d Mon Sep 17 00:00:00 2001 From: beierd Date: Mon, 21 Sep 2020 15:10:31 +0200 Subject: [PATCH 6/9] Adds new cop precalc functions and help functions --- src/scenario_builder/cop_precalc.py | 29 ++++++++++++++++++++++++++++- src/scenario_builder/snippets.py | 29 +++++++++++++++++++++++++++-- 2 files changed, 55 insertions(+), 3 deletions(-) diff --git a/src/scenario_builder/cop_precalc.py b/src/scenario_builder/cop_precalc.py index 1cbec43..203d788 100644 --- a/src/scenario_builder/cop_precalc.py +++ b/src/scenario_builder/cop_precalc.py @@ -3,7 +3,7 @@ import pandas as pd import numpy as np from disaggregator import data -from reegis import coastdat +from reegis import coastdat, demand_disaggregator from reegis import config as cfg @@ -244,3 +244,30 @@ def calculate_mixed_cops_by_nuts3(year, name, share_ashp=0.7, share_gshp=0.3, qu COP_NUTS3_water = pd.read_csv(fn_water) return COP_NUTS3, COP_NUTS3_water + + +def aggregate_COP_by_region(regions, COP): + mean_COP = pd.DataFrame(columns=regions.index) + #mean_COP_water = pd.DataFrame(columns=regions.index) + + nuts3_list = demand_disaggregator.get_nutslist_for_regions(regions) + + for zone in regions.index: + idx = nuts3_list.loc[zone]["nuts"] + mean_COP[zone] = COP[idx].mean(axis=1) + #mean_COP_water[zone] = COP_water[idx].mean(axis=1) + + return mean_COP + + +def get_hp_timeseries(heat_profile, cop, E_el): + cop.index = heat_profile.index + + for Q_rated in range(0,10000000,1000): + heat_hp = Q_rated * cop * heat_profile + elc_hp = heat_hp / cop + + if elc_hp.sum() > E_el: + break + + return elc_hp, heat_hp diff --git a/src/scenario_builder/snippets.py b/src/scenario_builder/snippets.py index 6edbb6b..7fdcbc8 100644 --- a/src/scenario_builder/snippets.py +++ b/src/scenario_builder/snippets.py @@ -11,7 +11,7 @@ def get_cost_emission_scenario_data(path_to_data): for n in commodity_sources.keys(): cost_data = pd.read_excel(path_to_data, n) cost_data.set_index('Unnamed: 0', drop=True, inplace=True) - cost_data.drop(['costs', 'emission_cost', 'carbon_price'], inplace=True) + cost_data.drop(['emission_cost', 'total_cost'], inplace=True) commodity_sources[n] = cost_data return commodity_sources @@ -30,7 +30,7 @@ def return_normalized_domestic_profiles(regions, df): return profile_domestic -def return_normalized_industrial_profiles(regions,df): +def return_normalized_industrial_profiles(regions, df): test = df.groupby(level=[0, 2], axis=1).sum() profile_domestic = pd.DataFrame(index=test.index, columns=regions.index[0:18]) @@ -86,3 +86,28 @@ def transform_NEP_capacities_to_de21(path_to_NEP_capacities): NEP_capacity_de21.loc[zone]["P_pv"] = P_NEP_nuts3["pv"][idx].sum() return NEP_capacity_de21 + + +def load_NEP_pp_capacities(path_to_NEP_capacities): + NEP2030_capacity = pd.read_excel(path_to_NEP_capacities) + NEP2030_capacity.set_index('fedstate', drop=True, inplace=True) + NEP2030_capacity = NEP2030_capacity.multiply(1e3) + + # Select pp capacities + pp_capacities = pd.concat([NEP2030_capacity['lignite'], NEP2030_capacity['hard coal'], NEP2030_capacity['oil'], + NEP2030_capacity['natural gas'], NEP2030_capacity['biomass'], + NEP2030_capacity['other']], axis=1) + + return pp_capacities + + +def aggregate_by_region(regions, data): + out_df = pd.Series(index=regions.index) + nuts3_list = demand_disaggregator.get_nutslist_for_regions(regions) + + for zone in regions.index: + idx = nuts3_list.loc[zone]["nuts"] + out_df.loc[zone] = data[idx].sum() + + return out_df + From fd5b01d9ba14e930668ffb2e25bbe9802006b87c Mon Sep 17 00:00:00 2001 From: beierd Date: Thu, 24 Sep 2020 10:03:02 +0200 Subject: [PATCH 7/9] Minor changes in comments --- src/scenario_builder/emobpy_processing.py | 2 +- src/scenario_builder/heatload_scenario_calculator.py | 3 +++ src/scenario_builder/snippets.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/scenario_builder/emobpy_processing.py b/src/scenario_builder/emobpy_processing.py index 8506fb4..da2e739 100644 --- a/src/scenario_builder/emobpy_processing.py +++ b/src/scenario_builder/emobpy_processing.py @@ -100,7 +100,7 @@ def return_normalized_charging_series(df): # Cut off initial charging df.iloc[0:48] = df.iloc[48:96].values - # Cut off end charging to intial SoC + # Cut off end charging to initial SoC df.iloc[len(df)-48:len(df)] = df.iloc[len(df)-96:len(df)-48].values idx = pd.DatetimeIndex(df.index, freq='30min') df.set_index(idx, inplace=True) diff --git a/src/scenario_builder/heatload_scenario_calculator.py b/src/scenario_builder/heatload_scenario_calculator.py index 0727836..d093613 100644 --- a/src/scenario_builder/heatload_scenario_calculator.py +++ b/src/scenario_builder/heatload_scenario_calculator.py @@ -134,6 +134,7 @@ def get_industry_CTS_process_heat_scenario(region_pick, efficiency_gain=0.2): sum_gas_industry = gc_industry.sum().sum() # Calculate factor of process heat consumption to gas consumption. # Assumption: Process heat demand correlates with gas demand + # ToDo Quellenangaben für hard gecodete Werte in Code beifügen process_heat_2015 = (515 + 42) * 1e6 process_heat = process_heat_2015 * (1-efficiency_gain) inc_fac = process_heat / sum_gas_industry @@ -157,6 +158,8 @@ def get_combined_heatload_for_region_scenario(name, region_pick=None, m_type=2 , Name of scenario region_pick : list Selected regions in NUTS-3 format, if None function will return demand for all regions + # ToDo Docstring ergänzen + eff_gain ... Returns: pd.DataFrame Dataframe containing aggregated yearly low temperature heat demand (households, CTS, industry) as well diff --git a/src/scenario_builder/snippets.py b/src/scenario_builder/snippets.py index 7fdcbc8..85ce496 100644 --- a/src/scenario_builder/snippets.py +++ b/src/scenario_builder/snippets.py @@ -66,7 +66,7 @@ def transform_NEP_capacities_to_de21(path_to_NEP_capacities): mapped_nuts = demand_disaggregator.get_nutslist_for_regions(fed_states) res_capacity_nuts3 = land_availability_glaes.get_pv_wind_capacity_potential_by_nuts3() - # Zuordnung der installierten Leistungen zu den jeweiligen Landkreisen + # Zuordnung der installierten Leistungen zu den jeweiligen Landkreisen P_NEP_nuts3 = pd.DataFrame(index=nuts3_index, columns=['onshore', 'pv']) for zone in compare_RES.index: From 683d90da9660612bab1f0f4e239b15eb4649967c Mon Sep 17 00:00:00 2001 From: beierd Date: Thu, 24 Sep 2020 10:03:24 +0200 Subject: [PATCH 8/9] Updates setup requirements --- setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/setup.py b/setup.py index eb6b9c3..9f736ed 100755 --- a/setup.py +++ b/setup.py @@ -69,6 +69,8 @@ def read(*names, **kwargs): ], python_requires=">=3.6", install_requires=[ + "oemof.thermal", + "emobpy" # eg: 'aspectlib==1.1.1', 'six>=1.7', ], extras_require={ From 5c9da956603f21fb41f0331be1f359d8b1782af7 Mon Sep 17 00:00:00 2001 From: beierd Date: Tue, 29 Sep 2020 14:05:29 +0200 Subject: [PATCH 9/9] Adds minor changes --- src/scenario_builder/cop_precalc.py | 2 ++ src/scenario_builder/snippets.py | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/scenario_builder/cop_precalc.py b/src/scenario_builder/cop_precalc.py index 203d788..ed3e701 100644 --- a/src/scenario_builder/cop_precalc.py +++ b/src/scenario_builder/cop_precalc.py @@ -169,7 +169,9 @@ def calculate_mixed_COPS_per_region(t_amb, t_ground, quality_grade, share_ashp=0 # Limit COP to 7 limit = cops_aggregated >= 7 + artifact = cops_aggregated < 0 cops_aggregated[limit] = 7 + cops_aggregated[artifact] = 7 return cops_aggregated diff --git a/src/scenario_builder/snippets.py b/src/scenario_builder/snippets.py index 85ce496..c7cb0fa 100644 --- a/src/scenario_builder/snippets.py +++ b/src/scenario_builder/snippets.py @@ -110,4 +110,3 @@ def aggregate_by_region(regions, data): out_df.loc[zone] = data[idx].sum() return out_df -