Skip to content

Commit

Permalink
some corrections and code cleaning
Browse files Browse the repository at this point in the history
  • Loading branch information
squoilin committed Apr 9, 2020
1 parent 125607b commit acd32ad
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 30 deletions.
12 changes: 3 additions & 9 deletions dispaset/preprocessing/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,12 +286,6 @@ def build_single_run(config, profiles=None):
# Calculating the efficiency time series for each unit:
Efficiencies = EfficiencyTimeSeries(config,Plants_merged,Temperatures)

# merge the outages:
for i in plants.index: # for all the old plant indexes
# get the old plant name corresponding to s:
oldname = plants['Unit'][i]
newname = mapping['NewIndex'][i]

# Reserve calculation
reserve_2U_tot = pd.DataFrame(index=Load.index,columns=Load.columns)
reserve_2D_tot = pd.DataFrame(index=Load.index,columns=Load.columns)
Expand Down Expand Up @@ -319,13 +313,13 @@ def build_single_run(config, profiles=None):

# Merge the following time series with weighted averages
for key in ['ScaledInflows','Outages','AvailabilityFactors','CostHeatSlack']:
finalTS[key] = merge_series(plants, finalTS[key], mapping, tablename=key)
finalTS[key] = merge_series(Plants_merged, plants, finalTS[key], tablename=key)
# Merge the following time series by summing
for key in ['HeatDemand']:
finalTS[key] = merge_series(plants, finalTS[key], mapping, tablename=key, method='Sum')
finalTS[key] = merge_series(Plants_merged, plants, finalTS[key], tablename=key, method='Sum')
# Merge the following time series by weighted average based on storage capacity
for key in ['ReservoirLevels']:
finalTS[key] = merge_series(plants, finalTS[key], mapping, tablename=key, method='StorageWeightedAverage')
finalTS[key] = merge_series(Plants_merged, plants, finalTS[key], tablename=key, method='StorageWeightedAverage')

# Check that all times series data is available with the specified data time step:
for key in FuelPrices:
Expand Down
44 changes: 24 additions & 20 deletions dispaset/preprocessing/data_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,24 +190,33 @@ def UnitBasedTable(plants,varname,config,fallbacks=['Unit'],default=None,Restric
return out


def merge_series(plants, data, mapping, method='WeightedAverage', tablename=''):
def merge_series(plants,oldplants, data, method='WeightedAverage', tablename=''):
"""
Function that merges the times series corresponding to the merged units (e.g. outages, inflows, etc.)
:param plants: Pandas dataframe with the information relative to the original units
:param plants: Pandas dataframe with final units after clustering (must contain 'FormerUnits')
:param oldplants: Pandas dataframe with the original units
:param data: Pandas dataframe with the time series and the original unit names as column header
:param mapping: Mapping between the merged units and the original units. Output of the clustering function
:param method: Select the merging method ('WeightedAverage'/'Sum')
:param tablename: Name of the table being processed (e.g. 'Outages'), used in the warnings
:return merged: Pandas dataframe with the merged time series when necessary
"""
# backward compatibility:
if not "Nunits" in plants:
plants['Nunits'] = 1

if not 'FormerUnits' in plants:
logging.critical('The unit table provided must contain the columns "FormerUnits"')
sys.exit(1)

plants.index = range(len(plants))
merged = pd.DataFrame(index=data.index)
unitnames = plants.Unit.values.tolist()

# Create a dictionary relating the former units to the new (clustered) ones:
units = {}
for u in plants.index:
for uu in plants.loc[u,'FormerUnits']:
units[uu] = u

# First check the data:
if not isinstance(data,pd.DataFrame):
logging.critical('The input "' + tablename + '" to the merge_series function must be a dataframe')
Expand All @@ -216,12 +225,10 @@ def merge_series(plants, data, mapping, method='WeightedAverage', tablename=''):
if str(data[key].dtype) not in ['bool','int','float','float16', 'float32', 'float64', 'float128','int8', 'int16', 'int32', 'int64']:
logging.critical('The column "' + str(key) + '" of table + "' + tablename + '" is not numeric!')
for key in data:
if key in unitnames:
i = unitnames.index(key)
newunit = mapping['NewIndex'][i]
if key in units:
newunit = units[key]
if newunit not in merged: # if the columns name is in the mapping and the new unit has not been processed yet
oldindexes = mapping['FormerIndexes'][newunit]
oldnames = [plants['Unit'][x] for x in oldindexes]
oldnames = plants.loc[newunit,'FormerUnits']
if all([name in data for name in oldnames]):
subunits = data[oldnames]
else:
Expand All @@ -232,25 +239,22 @@ def merge_series(plants, data, mapping, method='WeightedAverage', tablename=''):
sys.exit(1)
value = np.zeros(len(data))
# Renaming the subunits df headers with the old plant indexes instead of the unit names:
subunits.columns = mapping['FormerIndexes'][newunit]
if method == 'WeightedAverage':
for idx in oldindexes:
name = plants['Unit'][idx]
value = value + subunits[idx] * np.maximum(1e-9, plants['PowerCapacity'][idx]*plants['Nunits'][idx])
P_j = np.sum(np.maximum(1e-9, plants['PowerCapacity'][oldindexes]*plants['Nunits'][oldindexes]))
for name in oldnames:
value = value + subunits[name] * np.maximum(1e-9, oldplants['PowerCapacity'][name]*oldplants['Nunits'][name])
P_j = np.sum(np.maximum(1e-9, oldplants['PowerCapacity'][oldnames]*oldplants['Nunits'][oldnames]))
merged[newunit] = value / P_j
elif method == 'StorageWeightedAverage':
for idx in oldindexes:
name = plants['Unit'][idx]
value = value + subunits[idx] * np.maximum(1e-9, plants['STOCapacity'][idx]*plants['Nunits'][idx])
P_j = np.sum(np.maximum(1e-9, plants['STOCapacity'][oldindexes]*plants['Nunits'][oldindexes]))
for name in oldnames:
value = value + subunits[name] * np.maximum(1e-9, oldplants['STOCapacity'][name]*oldplants['Nunits'][name])
P_j = np.sum(np.maximum(1e-9, oldplants['STOCapacity'][oldnames]*oldplants['Nunits'][oldnames]))
merged[newunit] = value / P_j
elif method == 'Sum':
merged[newunit] = subunits.sum(axis=1)
else:
logging.critical('Method "' + str(method) + '" unknown in function MergeSeries')
sys.exit(1)
elif key in plants['Unit']:
elif key in oldplants['Unit']:
if not isinstance(key, tuple): # if the columns header is a tuple, it does not come from the data and has been added by Dispa-SET
logging.warning('Column ' + str(key) + ' present in the table "' + tablename + '" not found in the mapping between original and clustered units. Skipping')
else:
Expand Down
4 changes: 3 additions & 1 deletion dispaset/preprocessing/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ def create_agg_dict(df_, method="Standard"):
return agg_dict


def clustering(plants, method="Standard", Nslices=20, PartLoadMax=0.1, Pmax=30):
def clustering(plants_in, method="Standard", Nslices=20, PartLoadMax=0.1, Pmax=30):
"""
Merge excessively disaggregated power Units.
Expand All @@ -436,6 +436,8 @@ def clustering(plants, method="Standard", Nslices=20, PartLoadMax=0.1, Pmax=30):
@author: Matthias Zech
"""

# do not alter the original plants table:
plants = plants_in.copy()
# Checking the the required columns are present in the input pandas dataframe:
required_inputs = ['Unit', 'PowerCapacity', 'PartLoadMin', 'RampUpRate', 'RampDownRate', 'StartUpTime',
'MinUpTime', 'MinDownTime', 'NoLoadCost', 'StartUpCost', 'Efficiency']
Expand Down

0 comments on commit acd32ad

Please sign in to comment.