Skip to content

Commit

Permalink
Merge branch 'master' into improve-plotting-routine
Browse files Browse the repository at this point in the history
  • Loading branch information
p-glaum committed Jul 12, 2024
2 parents dda6baf + 2bf59e6 commit c2176b7
Show file tree
Hide file tree
Showing 23 changed files with 101 additions and 70 deletions.
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,6 @@ repos:

# Check for FSFE REUSE compliance (licensing)
- repo: https://github.com/fsfe/reuse-tool
rev: v3.1.0a1
rev: v4.0.3
hooks:
- id: reuse
13 changes: 7 additions & 6 deletions config/config.default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -571,12 +571,13 @@ sector:
min_part_load_fischer_tropsch: 0.5
min_part_load_methanolisation: 0.3
min_part_load_methanation: 0.3
use_fischer_tropsch_waste_heat: true
use_haber_bosch_waste_heat: true
use_methanolisation_waste_heat: true
use_methanation_waste_heat: true
use_fuel_cell_waste_heat: true
use_electrolysis_waste_heat: true
use_waste_heat:
fischer_tropsch: 0.25
haber_bosch: 0.25
methanolisation: 0.25
methanation: 0.25
fuel_cell: 0.25
electrolysis: 0.25
electricity_transmission_grid: true
electricity_distribution_grid: true
electricity_distribution_grid_cost_factor: 1.0
Expand Down
3 changes: 2 additions & 1 deletion doc/installation.rst
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ Nevertheless, you can still use open-source solvers for smaller problems.
.. note::
The rules :mod:`cluster_network` and :mod:`simplify_network` solve a mixed-integer quadratic optimisation problem for clustering.
The open-source solvers HiGHS, Cbc and GlPK cannot handle this. A fallback to SCIP is implemented in this case, which is included in the standard environment specifications.
For an open-source solver setup install in your ``conda`` environment on OSX/Linux. To install the default solver Gurobi, run
For an open-source solver setup install for example HiGHS **and** SCIP in your ``conda`` environment on OSX/Linux.
To install the default solver Gurobi, run

.. code:: bash
Expand Down
6 changes: 6 additions & 0 deletions doc/release_notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ Release Notes
Upcoming Release
================

* Changed default assumptions about waste heat usage from PtX and fuel cells in district heating.
The default value for the link efficiency scaling factor was changed from 100% to 25%.
It can be set to other values in the configuration ``sector: use_waste_heat:``.

* In simplifying polygons in :mod:`build_shapes` default to no tolerance.

* Set non-zero capital_cost for methanol stores to avoid unrealistic storage sizes

* Set p_nom = p_nom_min for generators with baseyear == grouping_year in add_existing_baseyear. This has no effect on the optimization but helps n.statistics to correctly report already installed capacities.
Expand Down
2 changes: 1 addition & 1 deletion envs/environment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ dependencies:
- powerplantmatching>=0.5.15
- numpy
- pandas>=2.1
- geopandas>=0.11.0, <1
- geopandas>=1
- xarray>=2023.11.0
- rioxarray
- netcdf4
Expand Down
12 changes: 6 additions & 6 deletions scripts/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -644,12 +644,12 @@ def update_config_from_wildcards(config, w, inplace=True):
config["sector"]["H2_network"] = False

if "nowasteheat" in opts:
config["sector"]["use_fischer_tropsch_waste_heat"] = False
config["sector"]["use_methanolisation_waste_heat"] = False
config["sector"]["use_haber_bosch_waste_heat"] = False
config["sector"]["use_methanation_waste_heat"] = False
config["sector"]["use_fuel_cell_waste_heat"] = False
config["sector"]["use_electrolysis_waste_heat"] = False
config["sector"]["use_waste_heat"]["fischer_tropsch"] = False
config["sector"]["use_waste_heat"]["methanolisation"] = False
config["sector"]["use_waste_heat"]["haber_bosch"] = False
config["sector"]["use_waste_heat"]["methanation"] = False
config["sector"]["use_waste_heat"]["fuel_cell"] = False
config["sector"]["use_waste_heat"]["electrolysis"] = False

if "nodistrict" in opts:
config["sector"]["district_heating"]["progress"] = 0.0
Expand Down
6 changes: 3 additions & 3 deletions scripts/add_electricity.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,9 +287,9 @@ def shapes_to_shapes(orig, dest):
transfer = sparse.lil_matrix((len(dest), len(orig)), dtype=float)

for i, j in product(range(len(dest)), range(len(orig))):
if orig_prepped[j].intersects(dest[i]):
area = orig[j].intersection(dest[i]).area
transfer[i, j] = area / dest[i].area
if orig_prepped[j].intersects(dest.iloc[i]):
area = orig.iloc[j].intersection(dest.iloc[i]).area
transfer[i, j] = area / dest.iloc[i].area

return transfer

Expand Down
13 changes: 8 additions & 5 deletions scripts/base_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -671,7 +671,7 @@ def _set_links_underwater_fraction(n, offshore_shapes):
if not hasattr(n.links, "geometry"):
n.links["underwater_fraction"] = 0.0
else:
offshore_shape = gpd.read_file(offshore_shapes).unary_union
offshore_shape = gpd.read_file(offshore_shapes).union_all()
links = gpd.GeoSeries(n.links.geometry.dropna().map(shapely.wkt.loads))
n.links["underwater_fraction"] = (
links.intersection(offshore_shape).length / links.length
Expand Down Expand Up @@ -874,7 +874,8 @@ def build_bus_shapes(n, country_shapes, offshore_shapes, countries):
onshore_locs.values, onshore_shape
),
"country": country,
}
},
crs=n.crs,
)
)

Expand All @@ -889,12 +890,14 @@ def build_bus_shapes(n, country_shapes, offshore_shapes, countries):
"y": offshore_locs["y"],
"geometry": voronoi_partition_pts(offshore_locs.values, offshore_shape),
"country": country,
}
},
crs=n.crs,
)
offshore_regions_c = offshore_regions_c.loc[offshore_regions_c.area > 1e-2]
sel = offshore_regions_c.to_crs(3035).area > 10 # m2
offshore_regions_c = offshore_regions_c.loc[sel]
offshore_regions.append(offshore_regions_c)

shapes = pd.concat(onshore_regions, ignore_index=True)
shapes = pd.concat(onshore_regions, ignore_index=True).set_crs(n.crs)

return onshore_regions, offshore_regions, shapes, offshore_shapes

Expand Down
13 changes: 8 additions & 5 deletions scripts/build_gas_input_locations.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
production sites with data from SciGRID_gas and Global Energy Monitor.
"""

import json
import logging

import geopandas as gpd
Expand All @@ -19,7 +20,8 @@

def read_scigrid_gas(fn):
df = gpd.read_file(fn)
df = pd.concat([df, df.param.apply(pd.Series)], axis=1)
expanded_param = df.param.apply(json.loads).apply(pd.Series)
df = pd.concat([df, expanded_param], axis=1)
df.drop(["param", "uncertainty", "method"], axis=1, inplace=True)
return df

Expand Down Expand Up @@ -97,11 +99,11 @@ def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries):
~(entry.from_country.isin(countries) & entry.to_country.isin(countries))
& ~entry.name.str.contains("Tegelen") # only take non-EU entries
| (entry.from_country == "NO") # malformed datapoint # entries from NO to GB
]
].copy()

sto = read_scigrid_gas(sto_fn)
remove_country = ["RU", "UA", "TR", "BY"] # noqa: F841
sto = sto.query("country_code not in @remove_country")
sto = sto.query("country_code not in @remove_country").copy()

# production sites inside the model scope
prod = build_gem_prod_data(gem_fn)
Expand Down Expand Up @@ -132,7 +134,8 @@ def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries):
snakemake = mock_snakemake(
"build_gas_input_locations",
simpl="",
clusters="128",
clusters="5",
configfiles="config/test/config.overnight.yaml",
)

configure_logging(snakemake)
Expand Down Expand Up @@ -162,7 +165,7 @@ def build_gas_input_locations(gem_fn, entry_fn, sto_fn, countries):

gas_input_nodes = gpd.sjoin(gas_input_locations, regions, how="left")

gas_input_nodes.rename(columns={"index_right": "bus"}, inplace=True)
gas_input_nodes.rename(columns={"name": "bus"}, inplace=True)

gas_input_nodes.to_file(snakemake.output.gas_input_nodes, driver="GeoJSON")

Expand Down
6 changes: 4 additions & 2 deletions scripts/build_gas_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
(https://www.gas.scigrid.de/).
"""

import json
import logging

import geopandas as gpd
Expand Down Expand Up @@ -54,8 +55,9 @@ def diameter_to_capacity(pipe_diameter_mm):

def load_dataset(fn):
df = gpd.read_file(fn)
param = df.param.apply(pd.Series)
method = df.method.apply(pd.Series)[["diameter_mm", "max_cap_M_m3_per_d"]]
param = df.param.apply(json.loads).apply(pd.Series)
cols = ["diameter_mm", "max_cap_M_m3_per_d"]
method = df.method.apply(json.loads).apply(pd.Series)[cols]
method.columns = method.columns + "_method"
df = pd.concat([df, param, method], axis=1)
to_drop = ["param", "uncertainty", "method", "tags"]
Expand Down
6 changes: 3 additions & 3 deletions scripts/build_hourly_heat_demand.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,10 @@
from _helpers import mock_snakemake

snakemake = mock_snakemake(
"build_hourly_heat_demands",
"build_hourly_heat_demand",
scope="total",
simpl="",
clusters=48,
clusters=5,
)
set_scenario_config(snakemake)

Expand Down Expand Up @@ -85,6 +85,6 @@

heat_demand.index.name = "snapshots"

ds = heat_demand.stack().to_xarray()
ds = heat_demand.stack(future_stack=True).to_xarray()

ds.to_netcdf(snakemake.output.heat_demand)
2 changes: 1 addition & 1 deletion scripts/build_industrial_distribution_key.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def prepare_hotmaps_database(regions):

gdf = gpd.sjoin(gdf, regions, how="inner", predicate="within")

gdf.rename(columns={"index_right": "bus"}, inplace=True)
gdf.rename(columns={"name": "bus"}, inplace=True)
gdf["country"] = gdf.bus.str[:2]

# the .sjoin can lead to duplicates if a geom is in two overlapping regions
Expand Down
4 changes: 2 additions & 2 deletions scripts/build_industrial_energy_demand_per_country_today.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def separate_basic_chemicals(demand, production):

demand.drop(columns="Basic chemicals", inplace=True)

demand["HVC"].clip(lower=0, inplace=True)
demand["HVC"] = demand["HVC"].clip(lower=0)

return demand

Expand Down Expand Up @@ -248,7 +248,7 @@ def industrial_energy_demand(countries, year):
demand = add_non_eu28_industrial_energy_demand(countries, demand, production)

# for format compatibility
demand = demand.stack(dropna=False).unstack(level=[0, 2])
demand = demand.stack(future_stack=True).unstack(level=[0, 2])

# style and annotation
demand.index.name = "TWh/a"
Expand Down
3 changes: 2 additions & 1 deletion scripts/build_industrial_production_per_country.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,8 @@ def separate_basic_chemicals(demand, year):
demand["Basic chemicals"] -= demand["Ammonia"]

# EE, HR and LT got negative demand through subtraction - poor data
demand["Basic chemicals"].clip(lower=0.0, inplace=True)
col = "Basic chemicals"
demand[col] = demand[col].clip(lower=0.0)

# assume HVC, methanol, chlorine production proportional to non-ammonia basic chemicals
distribution_key = (
Expand Down
4 changes: 3 additions & 1 deletion scripts/build_population_layouts.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,9 @@

# The first low density grid cells to reach rural fraction are rural
asc_density_i = density_cells_ct.sort_values().index
asc_density_cumsum = pop_cells_ct[asc_density_i].cumsum() / pop_cells_ct.sum()
asc_density_cumsum = (
pop_cells_ct.iloc[asc_density_i].cumsum() / pop_cells_ct.sum()
)
rural_fraction_ct = 1 - urban_fraction[ct]
pop_ct_rural_b = asc_density_cumsum < rural_fraction_ct
pop_ct_urban_b = ~pop_ct_rural_b
Expand Down
2 changes: 1 addition & 1 deletion scripts/build_renewable_profiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@

if snakemake.wildcards.technology.startswith("offwind"):
logger.info("Calculate underwater fraction of connections.")
offshore_shape = gpd.read_file(snakemake.input["offshore_shapes"]).unary_union
offshore_shape = gpd.read_file(snakemake.input["offshore_shapes"]).union_all()
underwater_fraction = []
for bus in buses:
p = centre_of_mass.sel(bus=bus).data
Expand Down
20 changes: 12 additions & 8 deletions scripts/build_shapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def _get_country(target, **keys):
return np.nan


def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
def _simplify_polys(polys, minarea=0.1, tolerance=None, filterremote=True):
if isinstance(polys, MultiPolygon):
polys = sorted(polys.geoms, key=attrgetter("area"), reverse=True)
mainpoly = polys[0]
Expand All @@ -106,7 +106,9 @@ def _simplify_polys(polys, minarea=0.1, tolerance=0.01, filterremote=True):
)
else:
polys = mainpoly
return polys.simplify(tolerance=tolerance)
if tolerance is not None:
polys = polys.simplify(tolerance=tolerance)
return polys


def countries(naturalearth, country_list):
Expand All @@ -124,7 +126,7 @@ def countries(naturalearth, country_list):
df = df.loc[
df.name.isin(country_list) & ((df["scalerank"] == 0) | (df["scalerank"] == 5))
]
s = df.set_index("name")["geometry"].map(_simplify_polys)
s = df.set_index("name")["geometry"].map(_simplify_polys).set_crs(df.crs)
if "RS" in country_list:
s["RS"] = s["RS"].union(s.pop("KV"))
# cleanup shape union
Expand All @@ -145,7 +147,8 @@ def eez(country_shapes, eez, country_list):
lambda s: _simplify_polys(s, filterremote=False)
)
s = gpd.GeoSeries(
{k: v for k, v in s.items() if v.distance(country_shapes[k]) < 1e-3}
{k: v for k, v in s.items() if v.distance(country_shapes[k]) < 1e-3},
crs=df.crs,
)
s = s.to_frame("geometry")
s.index.name = "name"
Expand All @@ -156,7 +159,7 @@ def country_cover(country_shapes, eez_shapes=None):
shapes = country_shapes
if eez_shapes is not None:
shapes = pd.concat([shapes, eez_shapes])
europe_shape = shapes.unary_union
europe_shape = shapes.union_all()
if isinstance(europe_shape, MultiPolygon):
europe_shape = max(europe_shape.geoms, key=attrgetter("area"))
return Polygon(shell=europe_shape.exterior)
Expand Down Expand Up @@ -235,11 +238,11 @@ def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp):
[["BA1", "BA", 3871.0], ["RS1", "RS", 7210.0], ["AL1", "AL", 2893.0]],
columns=["NUTS_ID", "country", "pop"],
geometry=gpd.GeoSeries(),
crs=df.crs,
)
manual["geometry"] = manual["country"].map(country_shapes)
manual["geometry"] = manual["country"].map(country_shapes.to_crs(df.crs))
manual = manual.dropna()
manual = manual.set_index("NUTS_ID")
manual = manual.set_crs("ETRS89")

df = pd.concat([df, manual], sort=False)

Expand All @@ -265,7 +268,8 @@ def nuts3(country_shapes, nuts3, nuts3pop, nuts3gdp, ch_cantons, ch_popgdp):
offshore_shapes.reset_index().to_file(snakemake.output.offshore_shapes)

europe_shape = gpd.GeoDataFrame(
geometry=[country_cover(country_shapes, offshore_shapes.geometry)]
geometry=[country_cover(country_shapes, offshore_shapes.geometry)],
crs=country_shapes.crs,
)
europe_shape.reset_index().to_file(snakemake.output.europe_shape)

Expand Down
4 changes: 1 addition & 3 deletions scripts/build_shipping_demand.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,7 @@
# assign ports to nearest region
p = european_ports.to_crs(3857)
r = regions.to_crs(3857)
outflows = (
p.sjoin_nearest(r).groupby("index_right").properties_outflows.sum().div(1e3)
)
outflows = p.sjoin_nearest(r).groupby("name").properties_outflows.sum().div(1e3)

# calculate fraction of each country's port outflows
countries = outflows.index.str[:2]
Expand Down
6 changes: 3 additions & 3 deletions scripts/cluster_gas_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@ def build_clustered_gas_network(df, bus_regions, length_factor=1.25):
for i in [0, 1]:
gdf = gpd.GeoDataFrame(geometry=df[f"point{i}"], crs="EPSG:4326")

bus_mapping = gpd.sjoin(
gdf, bus_regions, how="left", predicate="within"
).index_right
bus_mapping = gpd.sjoin(gdf, bus_regions, how="left", predicate="within")[
"name"
]
bus_mapping = bus_mapping.groupby(bus_mapping.index).first()

df[f"bus{i}"] = bus_mapping
Expand Down
2 changes: 1 addition & 1 deletion scripts/determine_availability_matrix_MD_UA.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def get_wdpa_layer_name(wdpa_fn, layer_substring):
regions_geometry = regions.to_crs(3035).geometry
band, transform = shape_availability(regions_geometry, excluder)
fig, ax = plt.subplots(figsize=(4, 8))
gpd.GeoSeries(regions_geometry.unary_union).plot(ax=ax, color="none")
gpd.GeoSeries(regions_geometry.union_all()).plot(ax=ax, color="none")
show(band, transform=transform, cmap="Greens", ax=ax)
plt.axis("off")
plt.savefig(snakemake.output.availability_map, bbox_inches="tight", dpi=500)
Expand Down
Loading

0 comments on commit c2176b7

Please sign in to comment.