Skip to content

Commit

Permalink
Merge branch 'develop' into 'master'
Browse files Browse the repository at this point in the history
Hotfix for tests

See merge request iek-3/shared-code/fine!315
  • Loading branch information
k-knosala committed Jan 29, 2024
2 parents f45fdbe + 6c05c41 commit a8f4a11
Show file tree
Hide file tree
Showing 24 changed files with 149 additions and 128 deletions.
1 change: 1 addition & 0 deletions fine/IOManagement/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
|br| @author: FINE Developer Team (FZJ IEK-3)
"""

from .standardIO import *
from .dictIO import *
from .exploitOutput import *
Expand Down
24 changes: 12 additions & 12 deletions fine/IOManagement/utilsIO.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,9 +274,9 @@ def addDFVariablesToXarray(xr_ds, component_dict, df_iteration_dict):
df_variable.index.set_names("component", level=0, inplace=True)

ds_component = xr.Dataset()
ds_component[
f"ts_{variable_description}"
] = df_variable.sort_index().to_xarray()
ds_component[f"ts_{variable_description}"] = (
df_variable.sort_index().to_xarray()
)

for comp in df_variable.index.get_level_values(0).unique():
this_class = comp.split("; ")[0]
Expand Down Expand Up @@ -372,9 +372,9 @@ def addSeriesVariablesToXarray(xr_ds, component_dict, series_iteration_dict, loc
df_variable = pd.concat(space_space_dict)
df_variable.index.set_names("component", level=0, inplace=True)
ds_component = xr.Dataset()
ds_component[
f"2d_{variable_description}"
] = df_variable.sort_index().to_xarray()
ds_component[f"2d_{variable_description}"] = (
df_variable.sort_index().to_xarray()
)

for comp in df_variable.index.get_level_values(0).unique():
this_class = comp.split("; ")[0]
Expand All @@ -396,9 +396,9 @@ def addSeriesVariablesToXarray(xr_ds, component_dict, series_iteration_dict, loc
df_variable = pd.concat(space_dict)
df_variable.index.set_names("component", level=0, inplace=True)
ds_component = xr.Dataset()
ds_component[
f"1d_{variable_description}"
] = df_variable.sort_index().to_xarray()
ds_component[f"1d_{variable_description}"] = (
df_variable.sort_index().to_xarray()
)

for comp in df_variable.index.get_level_values(0).unique():
this_class = comp.split("; ")[0]
Expand All @@ -420,9 +420,9 @@ def addSeriesVariablesToXarray(xr_ds, component_dict, series_iteration_dict, loc
df_variable = pd.concat(time_dict)
df_variable.index.set_names("component", level=0, inplace=True)
ds_component = xr.Dataset()
ds_component[
f"ts_{variable_description}"
] = df_variable.sort_index().to_xarray()
ds_component[f"ts_{variable_description}"] = (
df_variable.sort_index().to_xarray()
)

for comp in df_variable.index.get_level_values(0).unique():
this_class = comp.split("; ")[0]
Expand Down
1 change: 1 addition & 0 deletions fine/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
|br| @author: FINE Developer Team (FZJ IEK-3)
"""

from .energySystemModel import EnergySystemModel
from .sourceSink import Source, Sink
from .conversion import Conversion
Expand Down
1 change: 1 addition & 0 deletions fine/aggregations/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,6 @@
|br| @author: FINE Developer Team (FZJ IEK-3)
"""

from .spatialAggregation import *
from .technologyAggregation import *
1 change: 1 addition & 0 deletions fine/aggregations/spatialAggregation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
|br| @author: FINE Developer Team (FZJ IEK-3)
"""

from .aggregation import *
from .grouping import *
from .groupingUtils import *
Expand Down
7 changes: 3 additions & 4 deletions fine/aggregations/spatialAggregation/aggregation.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,9 @@ def aggregate_time_series_spatially(

weighted_sub_region_da = sub_region_da * sub_region_weight_da

xr_data_array_out.loc[
dict(space=sup_region_id)
] = weighted_sub_region_da.sum(dim="space") / sub_region_weight_da.sum(
dim="space"
xr_data_array_out.loc[dict(space=sup_region_id)] = (
weighted_sub_region_da.sum(dim="space")
/ sub_region_weight_da.sum(dim="space")
)

elif mode == "mean":
Expand Down
7 changes: 4 additions & 3 deletions fine/aggregations/spatialAggregation/groupingUtils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Functions to assist spatial grouping algorithms.
"""

import warnings
import numpy as np
from scipy.cluster import hierarchy
Expand Down Expand Up @@ -452,9 +453,9 @@ def get_connectivity_matrix(xarray_datasets):
for comp, comp_ds in comp_dict.items():
for varname, da in comp_ds.data_vars.items():
if varname[:3] == "2d_":
connectivity_matrix[
da.values > 0
] = 1 # if a pos, non-zero value exits, make a connection!
connectivity_matrix[da.values > 0] = (
1 # if a pos, non-zero value exits, make a connection!
)

return connectivity_matrix

Expand Down
1 change: 1 addition & 0 deletions fine/aggregations/spatialAggregation/manager.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
"""Manager function that calls spatial grouping and aggregation algorithm.
"""

import os
import logging
import warnings
Expand Down
1 change: 1 addition & 0 deletions fine/aggregations/spatialAggregation/managerUtils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Functions to assist spatial aggregation
"""

import warnings
import numpy as np
import pandas as pd
Expand Down
1 change: 1 addition & 0 deletions fine/aggregations/technologyAggregation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,6 @@
|br| @author: FINE Developer Team (FZJ IEK-3)
"""

from .techAggregation import *
from .techAggregationUtils import *
1 change: 1 addition & 0 deletions fine/aggregations/technologyAggregation/techAggregation.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Aggregation of RE technologies in every region.
"""

import logging
import numpy as np
import xarray as xr
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
Functions to assist technology aggregation algorithm.
"""

import warnings
import os
import numpy as np
Expand Down
74 changes: 39 additions & 35 deletions fine/component.py
Original file line number Diff line number Diff line change
Expand Up @@ -3089,10 +3089,11 @@ def getEconomicsDesign(
# write costs into dataframe
# a) costs for complete intervals
for i in range(commisYear, commisYear + intervalsWithCompleteCosts):
costContribution[(loc, compName)][
(commisYear, i)
] = annuity * utils.annuityPresentValueFactor(
esM, compName, loc, esM.investmentPeriodInterval
costContribution[(loc, compName)][(commisYear, i)] = (
annuity
* utils.annuityPresentValueFactor(
esM, compName, loc, esM.investmentPeriodInterval
)
)

# b) costs for last economic interval
Expand Down Expand Up @@ -3159,14 +3160,15 @@ def getEconomicsDesign(
]
)
if getOptValueCostType == "NPV":
cost_results[ip].loc[
compName, loc
] = cContrSum * utils.discountFactor(esM, ip, compName, loc)
cost_results[ip].loc[compName, loc] = (
cContrSum * utils.discountFactor(esM, ip, compName, loc)
)
elif getOptValueCostType == "TAC":
cost_results[ip].loc[
compName, loc
] = cContrSum / utils.annuityPresentValueFactor(
esM, compName, loc, esM.investmentPeriodInterval
cost_results[ip].loc[compName, loc] = (
cContrSum
/ utils.annuityPresentValueFactor(
esM, compName, loc, esM.investmentPeriodInterval
)
)
return cost_results
else:
Expand Down Expand Up @@ -3458,18 +3460,18 @@ def getEconomicsOperation(

locCompIpCombinations = list(set([(x[0], x[1], x[2]) for x in var]))
for loc, compName, year in locCompIpCombinations:
costContribution[(loc, compName)][
(year, year)
] = self.getLocEconomicsOperation(
pyM,
esM,
fncType,
factorNames,
varName,
loc,
compName,
year,
getOptValue,
costContribution[(loc, compName)][(year, year)] = (
self.getLocEconomicsOperation(
pyM,
esM,
fncType,
factorNames,
varName,
loc,
compName,
year,
getOptValue,
)
)

# create dictionary with ip as key and a dataframe with
Expand Down Expand Up @@ -3737,12 +3739,14 @@ def setOptimalValues(self, esM, pyM, indexColumns, plantUnit, unitApp=""):
tuples = list(
map(
lambda x: (
x[0],
x[1],
"[" + getattr(compDict[x[0]], plantUnit) + unitApp + "]",
)
if x[1] in ["capacity", "commissioning", "decommissioning"]
else x,
(
x[0],
x[1],
"[" + getattr(compDict[x[0]], plantUnit) + unitApp + "]",
)
if x[1] in ["capacity", "commissioning", "decommissioning"]
else x
),
tuples,
)
)
Expand Down Expand Up @@ -3871,9 +3875,9 @@ def setOptimalValues(self, esM, pyM, indexColumns, plantUnit, unitApp=""):
commisOptVal_ = utils.formatOptimizationOutput(
commisValues, "designVariables", self.dimension, ip, compDict=compDict
)
self._commissioningVariablesOptimum[
esM.investmentPeriodNames[ip]
] = commisOptVal_
self._commissioningVariablesOptimum[esM.investmentPeriodNames[ip]] = (
commisOptVal_
)
# Get and set optimal variable values for decommissioning
decommisValues = decommisVar.get_values()
decommisOptVal = utils.formatOptimizationOutput(
Expand All @@ -3882,9 +3886,9 @@ def setOptimalValues(self, esM, pyM, indexColumns, plantUnit, unitApp=""):
decommisOptVal_ = utils.formatOptimizationOutput(
decommisValues, "designVariables", self.dimension, ip, compDict=compDict
)
self._decommissioningVariablesOptimum[
esM.investmentPeriodNames[ip]
] = decommisOptVal_
self._decommissioningVariablesOptimum[esM.investmentPeriodNames[ip]] = (
decommisOptVal_
)

if capOptVal is not None:
# Check if the installed capacities are close to a bigM val
Expand Down
28 changes: 15 additions & 13 deletions fine/conversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,13 +447,13 @@ def setAggregatedTimeSeriesData(self, data, ip):
if self.fullCommodityConversionFactors[ip] != {}:
self.aggregatedCommodityConversionFactors[ip] = {}
for commod in self.fullCommodityConversionFactors[ip]:
self.aggregatedCommodityConversionFactors[ip][
commod
] = self.getTSAOutput(
self.fullCommodityConversionFactors[ip][commod],
"_commodityConversionFactorTimeSeries" + str(commod) + "_",
data,
ip,
self.aggregatedCommodityConversionFactors[ip][commod] = (
self.getTSAOutput(
self.fullCommodityConversionFactors[ip][commod],
"_commodityConversionFactorTimeSeries" + str(commod) + "_",
data,
ip,
)
)
else:
# if depending on the commissioning year, iterate over the relevant commissioning years for the
Expand Down Expand Up @@ -1243,12 +1243,14 @@ def setOptimalValues(self, esM, pyM):
tuples = list(
map(
lambda x: (
x[0],
x[1],
x[2].replace("-", compDict[x[0]].physicalUnit),
)
if x[1] == "operation"
else x,
(
x[0],
x[1],
x[2].replace("-", compDict[x[0]].physicalUnit),
)
if x[1] == "operation"
else x
),
tuples,
)
)
Expand Down
8 changes: 4 additions & 4 deletions fine/expansionModules/transformationPath.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,10 +215,10 @@ def getStock(esM, mileStoneYear, nbOfRepresentedYears):
if getattr(stockComp, "capacityFix") is None:
if isinstance(compValues.loc[comp], pd.DataFrame):
stockComp.processedCapacityFix = {}
stockComp.processedCapacityFix[
0
] = utils.preprocess2dimData(
compValues.loc[comp].fillna(value=-1), discard=False
stockComp.processedCapacityFix[0] = (
utils.preprocess2dimData(
compValues.loc[comp].fillna(value=-1), discard=False
)
)
else:
# NOTE: Values of capacityMin and capacityMax are not overwritten.
Expand Down
14 changes: 8 additions & 6 deletions fine/sourceSink.py
Original file line number Diff line number Diff line change
Expand Up @@ -1271,12 +1271,14 @@ def setOptimalValues(self, esM, pyM):
tuples = list(
map(
lambda x: (
x[0],
x[1],
x[2].replace("-", compDict[x[0]].commodityUnit),
)
if x[1] == "operation"
else x,
(
x[0],
x[1],
x[2].replace("-", compDict[x[0]].commodityUnit),
)
if x[1] == "operation"
else x
),
tuples,
)
)
Expand Down

0 comments on commit a8f4a11

Please sign in to comment.