From 4227b525edb253aa14a700f5d96761f9f7829ea2 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 15 Jul 2025 14:34:34 +0200 Subject: [PATCH 01/19] Add BMT cli commands --- message_ix_models/cli.py | 1 + message_ix_models/model/bmt/__init__.py | 1 + message_ix_models/model/bmt/cli.py | 74 +++++++++++++++++++++++++ 3 files changed, 76 insertions(+) create mode 100755 message_ix_models/model/bmt/__init__.py create mode 100755 message_ix_models/model/bmt/cli.py diff --git a/message_ix_models/cli.py b/message_ix_models/cli.py index eb850c45a5..683d85aced 100644 --- a/message_ix_models/cli.py +++ b/message_ix_models/cli.py @@ -172,6 +172,7 @@ def _log_threads(k: int, n: int): "message_ix_models.testing.cli", "message_ix_models.util.pooch", "message_ix_models.util.slurm", + "message_ix_models.model.bmt.cli", ] try: diff --git a/message_ix_models/model/bmt/__init__.py b/message_ix_models/model/bmt/__init__.py new file mode 100755 index 0000000000..7f6cc1472f --- /dev/null +++ b/message_ix_models/model/bmt/__init__.py @@ -0,0 +1 @@ +"""BMT runs.""" diff --git a/message_ix_models/model/bmt/cli.py b/message_ix_models/model/bmt/cli.py new file mode 100755 index 0000000000..692f943374 --- /dev/null +++ b/message_ix_models/model/bmt/cli.py @@ -0,0 +1,74 @@ +"""Command-line tools specific to MESSAGEix-BMT runs.""" + +import logging +import re +import click + +from message_ix_models.util.click import common_params + +log = logging.getLogger(__name__) + +# Define a command-line option for BMT-run scenarios +_SCENARIO = click.Option( + ["--scenario", "bmt_scenario"], + default="baseline", + help="The scenario ID of the specific step in the bmt workflow.", +) + + +# Define a Click command group for BMT-related commands +@click.group("bmt", params=[_SCENARIO]) +@click.pass_obj +def cli(context, bmt_scenario): + """MESSAGEix-BMT runs.""" + # Store the scenario in the context for use in commands + context.bmt = bmt_scenario + pass + + +# Define run command +@cli.command("run") +@common_params("dry_run") +@click.option("--from", "truncate_step", help="Run workflow from this step.") +@click.argument("target_step", metavar="TARGET") +@click.pass_obj +def run(context, truncate_step, target_step): + """Run the BMT workflow up to step TARGET. + + --from is interpreted as a regular expression, and the workflow is truncated at + every point matching this expression. + """ + from . import workflow + + wf = workflow.generate(context) + + # Compile the regular expression for truncating the workflow + try: + expr = re.compile(truncate_step.replace("\\", "")) + except AttributeError: + pass # truncate_step is None + else: + # Truncate the workflow at steps matching the expression + for step in filter(expr.fullmatch, wf.keys()): + log.info(f"Truncate workflow at {step!r}") + wf.truncate(step) + + # Compile the regular expression for the target step + target_expr = re.compile(target_step) + target_steps = sorted(filter(lambda k: target_expr.fullmatch(k), wf.keys())) + if len(target_steps) > 1: + # If multiple target steps match, create a composite target + target_step = "cli-targets" + wf.add(target_step, target_steps) + + log.info(f"Execute workflow:\n{wf.describe(target_step)}") + + # If dry_run is enabled, visualize the workflow instead of executing it + if context.dry_run: + path = context.get_local_path("bmt-workflow.svg") + wf.visualize(str(path)) + log.info(f"Workflow diagram written to {path}") + return + + # Run the workflow up to the specified target step + wf.run(target_step) From 39ba553f5d81dc0e36286a99a3261f9d3bbec55f Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 15 Jul 2025 17:04:49 +0200 Subject: [PATCH 02/19] Add bmt workflow --- message_ix_models/model/bmt/bmt-workflow.svg | 438 +++++++++++++++++++ message_ix_models/model/bmt/workflow.py | 199 +++++++++ 2 files changed, 637 insertions(+) create mode 100644 message_ix_models/model/bmt/bmt-workflow.svg create mode 100755 message_ix_models/model/bmt/workflow.py diff --git a/message_ix_models/model/bmt/bmt-workflow.svg b/message_ix_models/model/bmt/bmt-workflow.svg new file mode 100644 index 0000000000..8aa3525ec8 --- /dev/null +++ b/message_ix_models/model/bmt/bmt-workflow.svg @@ -0,0 +1,438 @@ + + + + + + + + + +-3138527637131153734 + +config + + + +8010087484358239092 + +context + + + +5213494346387836595 + +Step load -> SSP_SSP2_v4.0/baseline_DEFAULT_step_4 + + + +8010087484358239092->5213494346387836595 + + + + + +5993816466768370462 + +Step check_context() -> MESSAGEix-GLOBIOM BMT-R12/baseline + + + +8010087484358239092->5993816466768370462 + + + + + +-2780137556670762928 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline + + + +8010087484358239092->-2780137556670762928 + + + + + +-1245988322994229056 + +Step build_M() -> MESSAGEix-GLOBIOM BMT-R12/baseline_M + + + +8010087484358239092->-1245988322994229056 + + + + + +3876967463993781372 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline_M + + + +8010087484358239092->3876967463993781372 + + + + + +6343798505157873116 + +Step build_B() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BM + + + +8010087484358239092->6343798505157873116 + + + + + +2377520946427466550 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BM + + + +8010087484358239092->2377520946427466550 + + + + + +-6725768907819313663 + +Step build_T() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BMT + + + +8010087484358239092->-6725768907819313663 + + + + + +6033785042615799458 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BMT + + + +8010087484358239092->6033785042615799458 + + + + + +-622422390130841741 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BMT + + + +8010087484358239092->-622422390130841741 + + + + + +5355709012198059547 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BMT + + + +8010087484358239092->5355709012198059547 + + + + + +-4961721551581690417 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BMT + + + +8010087484358239092->-4961721551581690417 + + + + + +8247922472589924404 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BMT + + + +8010087484358239092->8247922472589924404 + + + + + +6915752491087776959 + +Step solve() -> MESSAGEix-GLOBIOM BMT-R12/baseline_BMT + + + +8010087484358239092->6915752491087776959 + + + + + +-7986057295493111700 + +base + + + +5213494346387836595->-7986057295493111700 + + + + + +-7986057295493111700->5993816466768370462 + + + + + +-3048750420622426492 + +base cloned + + + +5993816466768370462->-3048750420622426492 + + + + + +-3048750420622426492->-2780137556670762928 + + + + + +7190075889333577209 + +base solved + + + +-2780137556670762928->7190075889333577209 + + + + + +7190075889333577209->-1245988322994229056 + + + + + +-2698991952240204923 + +M built + + + +-1245988322994229056->-2698991952240204923 + + + + + +-2698991952240204923->3876967463993781372 + + + + + +2009373699380486598 + +M solved + + + +3876967463993781372->2009373699380486598 + + + + + +2009373699380486598->6343798505157873116 + + + + + +-4719355920131124489 + +B built + + + +6343798505157873116->-4719355920131124489 + + + + + +-4719355920131124489->2377520946427466550 + + + + + +3211775677043708972 + +BM solved + + + +2377520946427466550->3211775677043708972 + + + + + +3211775677043708972->-6725768907819313663 + + + + + +-4118130886122243111 + +T built + + + +-6725768907819313663->-4118130886122243111 + + + + + +-4118130886122243111->6033785042615799458 + + + + + +9139119180429798534 + +BMT baseline solved + + + +6033785042615799458->9139119180429798534 + + + + + +9139119180429798534->-622422390130841741 + + + + + +9139119180429798534->-4961721551581690417 + + + + + +549572791434497890 + +NPi2030 + + + +-622422390130841741->549572791434497890 + + + + + +549572791434497890->5355709012198059547 + + + + + +-8826629361462117508 + +NPi_forever + + + +5355709012198059547->-8826629361462117508 + + + + + +-4740073808525762559 + +NDC2030 + + + +-4961721551581690417->-4740073808525762559 + + + + + +-4740073808525762559->8247922472589924404 + + + + + +-4740073808525762559->6915752491087776959 + + + + + +-3796551369282453153 + +glasgow + + + +8247922472589924404->-3796551369282453153 + + + + + +-7882404326910568483 + +glasgow+ + + + +6915752491087776959->-7882404326910568483 + + + + + diff --git a/message_ix_models/model/bmt/workflow.py b/message_ix_models/model/bmt/workflow.py new file mode 100755 index 0000000000..677b675117 --- /dev/null +++ b/message_ix_models/model/bmt/workflow.py @@ -0,0 +1,199 @@ +# The workflow mainly contains the steps to build bmt baseline, +# as well as the steps to apply policy scenario settings. See bmt-workflow.svg. +# Example cli command: +# mix-models bmt run --from="base" "glasgow+" --dry-run + +import logging +import message_ix +import pandas as pd +import logging +import os +import genno +import re + +from typing import Optional +from itertools import product +from message_ix import Scenario +from message_ix_models import Context, ScenarioInfo +from message_ix_models.model.build import apply_spec +from message_ix_models.util import ( + package_data_path, + nodes_ex_world, + make_io, + add_par_data, +) +from message_ix_models.workflow import Workflow + +# from message_ix_models.model.buildings.build import build as build_B +from message_ix_models.model.material.build import build_M as build_M +# from message_ix_models.model.transport.build import build as build_T + +log = logging.getLogger(__name__) + +# Functions for individual workflow steps + +def solve( + context: Context, + scenario: message_ix.Scenario, + model="MESSAGE" + ) -> message_ix.Scenario: + + """Plain solve.""" + message_ix.models.DEFAULT_CPLEX_OPTIONS = { + "advind": 0, + "lpmethod": 4, + "threads": 4, + "epopt": 1e-6, + "scaind": -1, + # "predual": 1, + "barcrossalg": 0, + } + + # scenario.solve(model, gams_args=["--cap_comm=0"]) + scenario.solve(model) + scenario.set_as_default() + + return scenario + +def check_context( + context: Context, + scenario: message_ix.Scenario, + ) -> message_ix.Scenario: + + context.print_contents() + + return scenario + +# Main BMT workflow +def generate(context: Context) -> Workflow: + """Create the BMT-run workflow.""" + wf = Workflow(context) + context.ssp = "SSP2" + context.model.regions = "R12" + + # Define model name + model_name = "ixmp://ixmp-dev/MESSAGEix-GLOBIOM BMT-R12" + + wf.add_step( + "base", + None, + target="ixmp://ixmp-dev/SSP_SSP2_v6.1/baseline_DEFAULT_step_4", + # target = f"{model_name}/baseline", + ) + + wf.add_step( + "base cloned", + "base", + check_context, + # target="ixmp://ixmp-dev/SSP_SSP2_v4.0/baseline_DEFAULT_step_4", + target = f"{model_name}/baseline", + clone = dict(keep_solution=False), + ) + + wf.add_step( + "base solved", + "base cloned", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline", + clone = False, + ) + + + wf.add_step( + "M built", + "base solved", + build_M, + target = f"{model_name}/baseline_M", + clone = dict(keep_solution=False), + ) + + wf.add_step( + "M solved", + "M built", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline_M", + clone = False, + ) + + wf.add_step( + "B built", + "M solved", + build_B, + target = f"{model_name}/baseline_BM", #BM later + clone = dict(keep_solution=False), + ) + + wf.add_step( + "BM solved", + "B built", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline_BM", #BM later + clone = dict(keep_solution=False), + ) + + wf.add_step( + "T built", + "BM solved", + build_T, + target = f"{model_name}/baseline_BMT", + clone = dict(keep_solution=False), + ) + + wf.add_step( + "BMT baseline solved", + "T built", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline_BMT", + clone = False, + ) + + wf.add_step( + "NPi2030", + "BMT baseline solved", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline_BMT", + clone = False, + ) + + wf.add_step( + "NPi_forever", + "NPi2030", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline_BMT", + clone = False, + ) + + wf.add_step( + "NDC2030", + "BMT baseline solved", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline_BMT", + clone = False, + ) + + wf.add_step( + "glasgow", + "NDC2030", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline_BMT", + clone = False, + ) + + wf.add_step( + "glasgow+", + "NDC2030", + solve, + model = "MESSAGE", + target = f"{model_name}/baseline_BMT", + clone = False, + ) + + return wf From d2f11ff330577ae79902304f6990115895b63a20 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 15 Jul 2025 17:06:01 +0200 Subject: [PATCH 03/19] Add context-based material build --- message_ix_models/model/material/build.py | 57 ++++++++++++++++++++++ message_ix_models/model/material/cli.py | 7 +++ message_ix_models/model/material/config.py | 28 +++++++++++ message_ix_models/util/context.py | 6 +++ 4 files changed, 98 insertions(+) create mode 100644 message_ix_models/model/material/config.py diff --git a/message_ix_models/model/material/build.py b/message_ix_models/model/material/build.py index 04af978c8d..1e8fd10460 100644 --- a/message_ix_models/model/material/build.py +++ b/message_ix_models/model/material/build.py @@ -273,3 +273,60 @@ def make_spec(regions: str, materials: str or None = SPEC_LIST) -> Spec: ) from None return s + +# same as build(), but context-based +def build_M( + context: Context, + scenario: message_ix.Scenario, +) -> message_ix.Scenario: + """Set up materials accounting on `scenario`.""" + + # Read config and save to context.material + from message_ix_models.model.material.config import Config + config = Config() + context.material = config + + node_suffix = context.model.regions + + if node_suffix != "R12": + raise NotImplementedError( + "MESSAGEix-Materials is currently only supporting" + " MESSAGEix-GLOBIOM R12 regions" + ) + + if f"{node_suffix}_GLB" not in list(scenario.platform.regions().region): + # Required for material trade model + # TODO Include this in the spec, while not using it as a value for `node_loc` + scenario.platform.add_region(f"{node_suffix}_GLB", "region", "World") + + # Get the specification and apply to the base scenario + spec = make_spec(node_suffix) + apply_spec(scenario, spec, add_data, fast=True) # dry_run=True + + water_dict = pd.read_excel( + package_data_path("material", "other", "water_tec_pars.xlsx"), + sheet_name=None, + ) + scenario.check_out() + for par in water_dict.keys(): + scenario.add_par(par, water_dict[par]) + scenario.commit("add missing water tecs") + + # Adjust exogenous energy demand to incorporate the endogenized sectors + # Adjust the historical activity of the useful level industry technologies + # Coal calibration 2020 + add_ccs_technologies(scenario) + if context.material.old_calib: + modify_demand_and_hist_activity(scenario) + else: + modify_baseyear_bounds(scenario) + last_hist_year = scenario.par("historical_activity")["year_act"].max() + modify_industry_demand(scenario, last_hist_year, context.material.iea_data_path) + add_new_ind_hist_act(scenario, [last_hist_year], context.material.iea_data_path) + add_emission_accounting(scenario) + + if context.material.modify_existing_constraints: + calibrate_existing_constraints(scenario) + + return scenario + diff --git a/message_ix_models/model/material/cli.py b/message_ix_models/model/material/cli.py index 253caa8181..4b8cbab347 100644 --- a/message_ix_models/model/material/cli.py +++ b/message_ix_models/model/material/cli.py @@ -59,6 +59,13 @@ def cli(ssp): def build_scen( context, iea_data_path, tag, mode, scenario_name, old_calib, update_costs ): + # Collect CLI options and store themin context.material + context.material = { + "old_calib": old_calib, + "iea_data_path": iea_data_path, + "modify_existing_constraints": True, # Default value from build() function + } + """Build a scenario. Use the --url option to specify the base scenario. If this scenario is on a diff --git a/message_ix_models/model/material/config.py b/message_ix_models/model/material/config.py new file mode 100644 index 0000000000..4f6f51ebfb --- /dev/null +++ b/message_ix_models/model/material/config.py @@ -0,0 +1,28 @@ +# message_ix_models/model/material/config.py +from dataclasses import dataclass + +@dataclass +class Config: + """Configuration for MESSAGEix-Materials + (moving cli options to context so that build material can be called in other workflows). + + This dataclass stores and documents all configuration settings required and used by + :mod:`~message_ix_models.model.material`. It also handles (via + :meth:`.from_context`) loading configuration and values from files like + :file:`config.yaml`, while respecting higher-level configuration, for instance + :attr:`.model.Config.regions`. + """ + + old_calib: bool = False + iea_data_path: str = "P:ene.model\\IEA_database\\Florian\\" + modify_existing_constraints: bool = True # hardcoded to True + + # examples: common knobs (e.g., SSP knobs) here such as + # clinker ratio, reycling rate, etc. + + # @classmethod + # def from_context(cls, context, options=None): + # config = cls() + # if options: + # config = replace(config, **options) # type: ignore + # return config \ No newline at end of file diff --git a/message_ix_models/util/context.py b/message_ix_models/util/context.py index 272d03f117..7c0ce4fac6 100644 --- a/message_ix_models/util/context.py +++ b/message_ix_models/util/context.py @@ -43,6 +43,7 @@ ("message_ix_models.model.config", "model", True, True), ("message_ix_models.report.config", "report", True, False), ("message_ix_models.transport.config", "transport", False, False), + ("message_ix_models.model.material.config", "material", False, False), ) @@ -459,3 +460,8 @@ def write_debug_archive(self) -> None: Shorthand for :meth:`.Config.write_debug_archive`. """ self.core.write_debug_archive() + + def print_contents(self): + for attr in dir(self): + if not attr.startswith("__"): + print(f"{attr}: {getattr(self, attr)}") From 88fa9fc31fe13088342665786156665dab7d398d Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Sun, 27 Jul 2025 14:49:24 +0200 Subject: [PATCH 04/19] Add context-based cli build - build based on context.material - default attributes saved in config.py and passed to context.material - mix-models material cli options overwrites defaul attributes - should be compatible for both cli build and workflow build - commented out for now --- message_ix_models/model/material/cli.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/message_ix_models/model/material/cli.py b/message_ix_models/model/material/cli.py index 4b8cbab347..baff9ad874 100644 --- a/message_ix_models/model/material/cli.py +++ b/message_ix_models/model/material/cli.py @@ -59,12 +59,14 @@ def cli(ssp): def build_scen( context, iea_data_path, tag, mode, scenario_name, old_calib, update_costs ): - # Collect CLI options and store themin context.material - context.material = { - "old_calib": old_calib, - "iea_data_path": iea_data_path, - "modify_existing_constraints": True, # Default value from build() function - } + # # Collect CLI options and store them in context.material + # # This overwrites the default values from Config() + # from message_ix_models.model.material.config import Config + # context.material = Config( + # old_calib=old_calib, + # iea_data_path=iea_data_path, + # modify_existing_constraints=True, + # ) # TODO: needs testing """Build a scenario. From c176b0cd4c6a3dea2056e5568591fd8b2610998e Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Mon, 28 Jul 2025 09:47:31 +0200 Subject: [PATCH 05/19] Add buildings config --- message_ix_models/model/buildings/config.py | 23 +++++++++++++++++++++ message_ix_models/util/context.py | 1 + 2 files changed, 24 insertions(+) create mode 100644 message_ix_models/model/buildings/config.py diff --git a/message_ix_models/model/buildings/config.py b/message_ix_models/model/buildings/config.py new file mode 100644 index 0000000000..797ff4339e --- /dev/null +++ b/message_ix_models/model/buildings/config.py @@ -0,0 +1,23 @@ +# message_ix_models/model/buildings/config.py +from dataclasses import dataclass + +@dataclass +class Config: + """Configuration for MESSAGEix-Buildings + (moving cli options to context so that build buildings can be called in other workflows). + + This dataclass stores and documents all configuration settings required and used by + :mod:`~message_ix_models.model.buildings`. It also handles (via + :meth:`.from_context`) loading configuration and values from files like + :file:`config.yaml`, while respecting higher-level configuration, for instance + :attr:`.model.Config.regions`. + """ + + with_materials: bool = True + + # @classmethod + # def from_context(cls, context, options=None): + # config = cls() + # if options: + # config = replace(config, **options) # type: ignore + # return config \ No newline at end of file diff --git a/message_ix_models/util/context.py b/message_ix_models/util/context.py index 7c0ce4fac6..aeaf8dd96d 100644 --- a/message_ix_models/util/context.py +++ b/message_ix_models/util/context.py @@ -44,6 +44,7 @@ ("message_ix_models.report.config", "report", True, False), ("message_ix_models.transport.config", "transport", False, False), ("message_ix_models.model.material.config", "material", False, False), + ("message_ix_models.model.buildings.config", "buildings", False, False), ) From cb77202f2fdb7a44bf9afca064ef017784f338f4 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Mon, 28 Jul 2025 09:47:56 +0200 Subject: [PATCH 06/19] Add context-based build buildings --- message_ix_models/model/bmt/workflow.py | 2 +- message_ix_models/model/buildings/build.py | 95 ++++++++++++++++++++++ 2 files changed, 96 insertions(+), 1 deletion(-) diff --git a/message_ix_models/model/bmt/workflow.py b/message_ix_models/model/bmt/workflow.py index 677b675117..ce5366788d 100755 --- a/message_ix_models/model/bmt/workflow.py +++ b/message_ix_models/model/bmt/workflow.py @@ -24,7 +24,7 @@ ) from message_ix_models.workflow import Workflow -# from message_ix_models.model.buildings.build import build as build_B +from message_ix_models.model.buildings.build import build_B as build_B from message_ix_models.model.material.build import build_M as build_M # from message_ix_models.model.transport.build import build as build_T diff --git a/message_ix_models/model/buildings/build.py b/message_ix_models/model/buildings/build.py index f74f2b1cf3..fca1787f32 100644 --- a/message_ix_models/model/buildings/build.py +++ b/message_ix_models/model/buildings/build.py @@ -31,6 +31,8 @@ get_region_codes, ) from message_ix_models.util import ( + package_data_path, + private_data_path, load_package_data, make_io, merge_data, @@ -143,6 +145,11 @@ def get_spec(context: Context) -> Spec: s = deepcopy(context["buildings spec"]) + # Read config and save to context.buildings + from message_ix_models.model.buildings.config import Config + config = Config() + context.buildings = config + if context.buildings.with_materials: s.require.set["commodity"].extend(MATERIALS) @@ -821,3 +828,91 @@ def materials( # Concatenate data frames together return {k: pd.concat(v) for k, v in result.items()} + +# works in the same way as main() but applicable for ssp baseline scenarios +def build_B( + context: Context, + scenario: message_ix.Scenario, + ): + """Set up the structure and data for MESSAGEix_Buildings on `scenario`. + + Parameters + ---------- + scenario + Scenario to set up. + """ + info = ScenarioInfo(scenario) + + from message_ix_models.model.buildings.config import Config + config = Config() + context.buildings = config + + scenario.check_out() + + try: + # TODO explain what this is for + scenario.init_set("time_relative") + except ValueError: + pass # Already exists + + # Generate a spec for the model + spec = get_spec(context) + + # Temporary: input for prepare data seperately read from csv + # prices + price_path = private_data_path("buildings", "input_prices_R12.csv") + prices = pd.read_csv(price_path) + + # sturm_r + sturm_r_path = private_data_path("buildings", "resid_sturm.csv") + # sturm_r_path = package_data_path("buildings", "debug-sturm-resid.csv") + sturm_r = pd.read_csv(sturm_r_path, index_col=0) + + # sturm_c + sturm_c_path = private_data_path("buildings", "comm_sturm.csv") + # sturm_c_path = package_data_path("buildings", "debug-sturm-comm.csv") + sturm_c = pd.read_csv(sturm_c_path, index_col=0) + + # # e_use + # e_use_path = package_data_path("buildings", "e_use.csv") + # e_use = pd.read_csv(e_use_path) + + # demand + expr = "(cool|heat|hotwater)" + excl = "v_no_heat" + demand = pd.concat( + [ + # e_use[~e_use.commodity.str.contains("therm")], + sturm_r[sturm_r.commodity.str.contains(expr) & ~sturm_r.commodity.str.contains(excl)], + sturm_c[sturm_c.commodity.str.contains(expr) & ~sturm_c.commodity.str.contains(excl)], + ] + ).assign(level="useful") + # demand.to_csv("debug-demand.csv") + + # Prepare data based on the contents of `scenario` + data = prepare_data( + scenario, + info, + demand, + prices, + sturm_r, + sturm_c, + context.buildings.with_materials, + relations=spec.require.set["relation"], + ) + + # Remove unused commodities and technologies + prune_spec(spec, data) + + # Simple callback for apply_spec() + def _add_data(s, **kw): + return data + + # FIXME check whether this works correctly on the re-solve of a scenario that has + # already been set up + options = dict(fast=True) + build.apply_spec(scenario, spec, _add_data, **options) + + scenario.set_as_default() + + log.info(f"Built {scenario.url} and set as default") \ No newline at end of file From 67594e8ca1d2b21ac27f9503edeb868118a3c1cc Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 16 Sep 2025 22:13:46 +0200 Subject: [PATCH 07/19] Update build buidlings for SSP baselines --- message_ix_models/data/buildings/set.yaml | 4 +-- message_ix_models/model/buildings/build.py | 32 ++++++++++++++++------ 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/message_ix_models/data/buildings/set.yaml b/message_ix_models/data/buildings/set.yaml index 0afec57b4b..6652be046d 100644 --- a/message_ix_models/data/buildings/set.yaml +++ b/message_ix_models/data/buildings/set.yaml @@ -100,13 +100,13 @@ relation: # Relations for which data should be adapted require: - BCA_Emission - - CH4_Emission + # - CH4_Emission - CO2_r_c - CO_Emission - HFC_Emission - HFC_foam_red - HFC_rescom_red - - N2O_Emission + #- N2O_Emission - NH3_Emission - NOx_Emission - OCA_Emission diff --git a/message_ix_models/model/buildings/build.py b/message_ix_models/model/buildings/build.py index fca1787f32..5ec3b70b79 100644 --- a/message_ix_models/model/buildings/build.py +++ b/message_ix_models/model/buildings/build.py @@ -308,7 +308,15 @@ def load_config(context: Context) -> None: # Generate technologies that replace corresponding *_rc|RC in the base model expr = re.compile("_(rc|RC)$") + + # Technologies that should not be transformed to afofi + exclude_techs = {"sp_el_RC", "sp_el_RC_RT"} + for t in filter(lambda x: expr.search(x.id), get_codes("technology")): + # Skip technologies that should not be transformed + if t.id in exclude_techs: + continue + # Generate a new Code object, preserving annotations new = deepcopy(t) new.id = expr.sub("_afofi", t.id) @@ -533,9 +541,10 @@ def prepare_data( )["technology"].unique() # Mapping from source to generated names for scale_and_replace + exclude_techs = {"sp_el_RC", "sp_el_RC_RT"} # Exclude technologies that should not be transformed to afofi replace = { "commodity": c_map, - "technology": {t: re.sub("(rc|RC)", "afofi", t) for t in rc_techs}, + "technology": {t: re.sub("(rc|RC)", "afofi", t) for t in rc_techs if t not in exclude_techs}, } # Compute shares with dimensions (t, n) for scaling parameter data t_shares = get_afofi_technology_shares(c_share, replace["technology"].keys()) @@ -569,8 +578,7 @@ def prepare_data( else: tech_new = f"{fuel}_" + commodity.replace(f"_{fuel}", "") - # commented: for debugging - # print(f"{fuel = }", f"{commodity = }", f"{tech_new = }", sep="\n") + print(f" Commodity: {commodity} -> Tech: {tech_new}") # Modify data for name, filters, extra in ( # type: ignore @@ -593,6 +601,12 @@ def prepare_data( tmp = {k: pd.concat(v) for k, v in data.items()} adapt_emission_factors(tmp) merge_data(result, tmp) + + # Add demand data - append to existing demand if it exists + if "demand" in result: + result["demand"] = pd.concat([result["demand"], demand]) + else: + result["demand"] = demand log.info( "Prepared:\n" + "\n".join(f"{len(v)} obs for {k!r}" for k, v in result.items()) @@ -873,21 +887,21 @@ def build_B( # sturm_c_path = package_data_path("buildings", "debug-sturm-comm.csv") sturm_c = pd.read_csv(sturm_c_path, index_col=0) - # # e_use - # e_use_path = package_data_path("buildings", "e_use.csv") - # e_use = pd.read_csv(e_use_path) + # e_use + e_use_path = private_data_path("buildings", "e_use.csv") + e_use = pd.read_csv(e_use_path, index_col=0) # demand - expr = "(cool|heat|hotwater)" + expr = "(cool|heat|hotwater|floor|other_uses)" excl = "v_no_heat" demand = pd.concat( [ - # e_use[~e_use.commodity.str.contains("therm")], + e_use[~e_use.commodity.str.contains("therm")], sturm_r[sturm_r.commodity.str.contains(expr) & ~sturm_r.commodity.str.contains(excl)], sturm_c[sturm_c.commodity.str.contains(expr) & ~sturm_c.commodity.str.contains(excl)], ] ).assign(level="useful") - # demand.to_csv("debug-demand.csv") + demand.to_csv("debug-demand.csv") # Prepare data based on the contents of `scenario` data = prepare_data( From d6e54bd5887a16c4f6b1cec121cda4cca3b44eed Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 16 Sep 2025 22:24:52 +0200 Subject: [PATCH 08/19] Apply pre-commit auto-fixes --- message_ix_models/model/bmt/cli.py | 1 + message_ix_models/model/bmt/workflow.py | 137 +++++++++----------- message_ix_models/model/buildings/build.py | 41 ++++-- message_ix_models/model/buildings/config.py | 14 +- message_ix_models/model/material/build.py | 18 ++- message_ix_models/model/material/config.py | 20 +-- 6 files changed, 119 insertions(+), 112 deletions(-) diff --git a/message_ix_models/model/bmt/cli.py b/message_ix_models/model/bmt/cli.py index 692f943374..fe20d8e12d 100755 --- a/message_ix_models/model/bmt/cli.py +++ b/message_ix_models/model/bmt/cli.py @@ -2,6 +2,7 @@ import logging import re + import click from message_ix_models.util.click import common_params diff --git a/message_ix_models/model/bmt/workflow.py b/message_ix_models/model/bmt/workflow.py index ce5366788d..823525d9b6 100755 --- a/message_ix_models/model/bmt/workflow.py +++ b/message_ix_models/model/bmt/workflow.py @@ -1,52 +1,35 @@ # The workflow mainly contains the steps to build bmt baseline, -# as well as the steps to apply policy scenario settings. See bmt-workflow.svg. +# as well as the steps to apply policy scenario settings. See bmt-workflow.svg. # Example cli command: # mix-models bmt run --from="base" "glasgow+" --dry-run import logging + import message_ix -import pandas as pd -import logging -import os -import genno -import re - -from typing import Optional -from itertools import product -from message_ix import Scenario -from message_ix_models import Context, ScenarioInfo -from message_ix_models.model.build import apply_spec -from message_ix_models.util import ( - package_data_path, - nodes_ex_world, - make_io, - add_par_data, -) -from message_ix_models.workflow import Workflow +from message_ix_models import Context from message_ix_models.model.buildings.build import build_B as build_B from message_ix_models.model.material.build import build_M as build_M -# from message_ix_models.model.transport.build import build as build_T +from message_ix_models.model.transport.build import main as build_T +from message_ix_models.workflow import Workflow log = logging.getLogger(__name__) # Functions for individual workflow steps -def solve( - context: Context, - scenario: message_ix.Scenario, - model="MESSAGE" - ) -> message_ix.Scenario: +def solve( + context: Context, scenario: message_ix.Scenario, model="MESSAGE" +) -> message_ix.Scenario: """Plain solve.""" message_ix.models.DEFAULT_CPLEX_OPTIONS = { - "advind": 0, - "lpmethod": 4, - "threads": 4, - "epopt": 1e-6, - "scaind": -1, - # "predual": 1, - "barcrossalg": 0, + "advind": 0, + "lpmethod": 4, + "threads": 4, + "epopt": 1e-6, + "scaind": -1, + # "predual": 1, + "barcrossalg": 0, } # scenario.solve(model, gams_args=["--cap_comm=0"]) @@ -55,15 +38,16 @@ def solve( return scenario -def check_context( - context: Context, - scenario: message_ix.Scenario, - ) -> message_ix.Scenario: +def check_context( + context: Context, + scenario: message_ix.Scenario, +) -> message_ix.Scenario: context.print_contents() return scenario + # Main BMT workflow def generate(context: Context) -> Workflow: """Create the BMT-run workflow.""" @@ -77,7 +61,7 @@ def generate(context: Context) -> Workflow: wf.add_step( "base", None, - target="ixmp://ixmp-dev/SSP_SSP2_v6.1/baseline_DEFAULT_step_4", + target="ixmp://ixmp-dev/SSP_SSP2_v6.1/baseline_DEFAULT_step_4", # target = f"{model_name}/baseline", ) @@ -85,115 +69,114 @@ def generate(context: Context) -> Workflow: "base cloned", "base", check_context, - # target="ixmp://ixmp-dev/SSP_SSP2_v4.0/baseline_DEFAULT_step_4", - target = f"{model_name}/baseline", - clone = dict(keep_solution=False), + # target="ixmp://ixmp-dev/SSP_SSP2_v4.0/baseline_DEFAULT_step_4", + target=f"{model_name}/baseline", + clone=dict(keep_solution=False), ) wf.add_step( "base solved", "base cloned", solve, - model = "MESSAGE", - target = f"{model_name}/baseline", - clone = False, + model="MESSAGE", + target=f"{model_name}/baseline", + clone=False, ) - wf.add_step( - "M built", - "base solved", + "M built", + "base solved", build_M, - target = f"{model_name}/baseline_M", - clone = dict(keep_solution=False), + target=f"{model_name}/baseline_M", + clone=dict(keep_solution=False), ) wf.add_step( "M solved", "M built", solve, - model = "MESSAGE", - target = f"{model_name}/baseline_M", - clone = False, + model="MESSAGE", + target=f"{model_name}/baseline_M", + clone=False, ) wf.add_step( "B built", "M solved", build_B, - target = f"{model_name}/baseline_BM", #BM later - clone = dict(keep_solution=False), + target=f"{model_name}/baseline_BM", # BM later + clone=dict(keep_solution=False), ) wf.add_step( "BM solved", "B built", solve, - model = "MESSAGE", - target = f"{model_name}/baseline_BM", #BM later - clone = dict(keep_solution=False), + model="MESSAGE", + target=f"{model_name}/baseline_BM", # BM later + clone=dict(keep_solution=False), ) wf.add_step( "T built", "BM solved", build_T, - target = f"{model_name}/baseline_BMT", - clone = dict(keep_solution=False), - ) + target=f"{model_name}/baseline_BMT", + clone=dict(keep_solution=False), + ) wf.add_step( "BMT baseline solved", "T built", solve, - model = "MESSAGE", - target = f"{model_name}/baseline_BMT", - clone = False, + model="MESSAGE", + target=f"{model_name}/baseline_BMT", + clone=False, ) wf.add_step( "NPi2030", "BMT baseline solved", solve, - model = "MESSAGE", - target = f"{model_name}/baseline_BMT", - clone = False, + model="MESSAGE", + target=f"{model_name}/baseline_BMT", + clone=False, ) wf.add_step( "NPi_forever", "NPi2030", solve, - model = "MESSAGE", - target = f"{model_name}/baseline_BMT", - clone = False, + model="MESSAGE", + target=f"{model_name}/baseline_BMT", + clone=False, ) wf.add_step( "NDC2030", "BMT baseline solved", solve, - model = "MESSAGE", - target = f"{model_name}/baseline_BMT", - clone = False, + model="MESSAGE", + target=f"{model_name}/baseline_BMT", + clone=False, ) wf.add_step( "glasgow", "NDC2030", solve, - model = "MESSAGE", - target = f"{model_name}/baseline_BMT", - clone = False, + model="MESSAGE", + target=f"{model_name}/baseline_BMT", + clone=False, ) wf.add_step( "glasgow+", "NDC2030", solve, - model = "MESSAGE", - target = f"{model_name}/baseline_BMT", - clone = False, + model="MESSAGE", + target=f"{model_name}/baseline_BMT", + clone=False, ) return wf diff --git a/message_ix_models/model/buildings/build.py b/message_ix_models/model/buildings/build.py index 5ec3b70b79..b37163c80f 100644 --- a/message_ix_models/model/buildings/build.py +++ b/message_ix_models/model/buildings/build.py @@ -31,12 +31,11 @@ get_region_codes, ) from message_ix_models.util import ( - package_data_path, - private_data_path, load_package_data, make_io, merge_data, nodes_ex_world, + private_data_path, ) from .rc_afofi import get_afofi_commodity_shares, get_afofi_technology_shares @@ -147,6 +146,7 @@ def get_spec(context: Context) -> Spec: # Read config and save to context.buildings from message_ix_models.model.buildings.config import Config + config = Config() context.buildings = config @@ -308,15 +308,15 @@ def load_config(context: Context) -> None: # Generate technologies that replace corresponding *_rc|RC in the base model expr = re.compile("_(rc|RC)$") - - # Technologies that should not be transformed to afofi + + # Technologies that should not be transformed to afofi exclude_techs = {"sp_el_RC", "sp_el_RC_RT"} - + for t in filter(lambda x: expr.search(x.id), get_codes("technology")): # Skip technologies that should not be transformed if t.id in exclude_techs: continue - + # Generate a new Code object, preserving annotations new = deepcopy(t) new.id = expr.sub("_afofi", t.id) @@ -541,10 +541,15 @@ def prepare_data( )["technology"].unique() # Mapping from source to generated names for scale_and_replace - exclude_techs = {"sp_el_RC", "sp_el_RC_RT"} # Exclude technologies that should not be transformed to afofi + # Exclude technologies that should not be transformed to afofi + exclude_techs = {"sp_el_RC", "sp_el_RC_RT"} replace = { "commodity": c_map, - "technology": {t: re.sub("(rc|RC)", "afofi", t) for t in rc_techs if t not in exclude_techs}, + "technology": { + t: re.sub("(rc|RC)", "afofi", t) + for t in rc_techs + if t not in exclude_techs + }, } # Compute shares with dimensions (t, n) for scaling parameter data t_shares = get_afofi_technology_shares(c_share, replace["technology"].keys()) @@ -601,7 +606,7 @@ def prepare_data( tmp = {k: pd.concat(v) for k, v in data.items()} adapt_emission_factors(tmp) merge_data(result, tmp) - + # Add demand data - append to existing demand if it exists if "demand" in result: result["demand"] = pd.concat([result["demand"], demand]) @@ -843,11 +848,12 @@ def materials( # Concatenate data frames together return {k: pd.concat(v) for k, v in result.items()} + # works in the same way as main() but applicable for ssp baseline scenarios def build_B( context: Context, scenario: message_ix.Scenario, - ): +): """Set up the structure and data for MESSAGEix_Buildings on `scenario`. Parameters @@ -858,6 +864,7 @@ def build_B( info = ScenarioInfo(scenario) from message_ix_models.model.buildings.config import Config + config = Config() context.buildings = config @@ -896,9 +903,15 @@ def build_B( excl = "v_no_heat" demand = pd.concat( [ - e_use[~e_use.commodity.str.contains("therm")], - sturm_r[sturm_r.commodity.str.contains(expr) & ~sturm_r.commodity.str.contains(excl)], - sturm_c[sturm_c.commodity.str.contains(expr) & ~sturm_c.commodity.str.contains(excl)], + e_use[~e_use.commodity.str.contains("therm")], + sturm_r[ + sturm_r.commodity.str.contains(expr) + & ~sturm_r.commodity.str.contains(excl) + ], + sturm_c[ + sturm_c.commodity.str.contains(expr) + & ~sturm_c.commodity.str.contains(excl) + ], ] ).assign(level="useful") demand.to_csv("debug-demand.csv") @@ -929,4 +942,4 @@ def _add_data(s, **kw): scenario.set_as_default() - log.info(f"Built {scenario.url} and set as default") \ No newline at end of file + log.info(f"Built {scenario.url} and set as default") diff --git a/message_ix_models/model/buildings/config.py b/message_ix_models/model/buildings/config.py index 797ff4339e..a8dc0fd41f 100644 --- a/message_ix_models/model/buildings/config.py +++ b/message_ix_models/model/buildings/config.py @@ -1,10 +1,12 @@ # message_ix_models/model/buildings/config.py from dataclasses import dataclass + @dataclass class Config: - """Configuration for MESSAGEix-Buildings - (moving cli options to context so that build buildings can be called in other workflows). + """Configuration for MESSAGEix-Buildings + (moving cli options to context so that build buildings can be called + in other workflows). This dataclass stores and documents all configuration settings required and used by :mod:`~message_ix_models.model.buildings`. It also handles (via @@ -12,12 +14,12 @@ class Config: :file:`config.yaml`, while respecting higher-level configuration, for instance :attr:`.model.Config.regions`. """ - + with_materials: bool = True - + # @classmethod # def from_context(cls, context, options=None): - # config = cls() + # config = cls() # if options: # config = replace(config, **options) # type: ignore - # return config \ No newline at end of file + # return config diff --git a/message_ix_models/model/material/build.py b/message_ix_models/model/material/build.py index 1e8fd10460..c700d2f400 100644 --- a/message_ix_models/model/material/build.py +++ b/message_ix_models/model/material/build.py @@ -3,6 +3,7 @@ from typing import Any, Optional import message_ix +import pandas as pd from message_ix_models import Context from message_ix_models.model.build import apply_spec @@ -274,6 +275,7 @@ def make_spec(regions: str, materials: str or None = SPEC_LIST) -> Spec: return s + # same as build(), but context-based def build_M( context: Context, @@ -283,6 +285,7 @@ def build_M( # Read config and save to context.material from message_ix_models.model.material.config import Config + config = Config() context.material = config @@ -315,18 +318,21 @@ def build_M( # Adjust exogenous energy demand to incorporate the endogenized sectors # Adjust the historical activity of the useful level industry technologies # Coal calibration 2020 - add_ccs_technologies(scenario) + # add_ccs_technologies(scenario) # TODO: Function not found if context.material.old_calib: modify_demand_and_hist_activity(scenario) else: - modify_baseyear_bounds(scenario) - last_hist_year = scenario.par("historical_activity")["year_act"].max() - modify_industry_demand(scenario, last_hist_year, context.material.iea_data_path) - add_new_ind_hist_act(scenario, [last_hist_year], context.material.iea_data_path) + # modify_baseyear_bounds(scenario) # TODO: Function not found + # last_hist_year = scenario.par("historical_activity")["year_act"].max() + # modify_industry_demand(scenario, last_hist_year, + # context.material.iea_data_path) + # TODO: Function not found + # add_new_ind_hist_act(scenario, [last_hist_year], + # context.material.iea_data_path) + # TODO: Function not found add_emission_accounting(scenario) if context.material.modify_existing_constraints: calibrate_existing_constraints(scenario) return scenario - diff --git a/message_ix_models/model/material/config.py b/message_ix_models/model/material/config.py index 4f6f51ebfb..61788c8726 100644 --- a/message_ix_models/model/material/config.py +++ b/message_ix_models/model/material/config.py @@ -1,10 +1,12 @@ # message_ix_models/model/material/config.py from dataclasses import dataclass + @dataclass class Config: - """Configuration for MESSAGEix-Materials - (moving cli options to context so that build material can be called in other workflows). + """Configuration for MESSAGEix-Materials + (moving cli options to context so that build material can be called + in other workflows). This dataclass stores and documents all configuration settings required and used by :mod:`~message_ix_models.model.material`. It also handles (via @@ -12,17 +14,17 @@ class Config: :file:`config.yaml`, while respecting higher-level configuration, for instance :attr:`.model.Config.regions`. """ - + old_calib: bool = False iea_data_path: str = "P:ene.model\\IEA_database\\Florian\\" - modify_existing_constraints: bool = True # hardcoded to True + modify_existing_constraints: bool = True # hardcoded to True + + # examples: common knobs (e.g., SSP knobs) here such as + # clinker ratio, reycling rate, etc. - # examples: common knobs (e.g., SSP knobs) here such as - # clinker ratio, reycling rate, etc. - # @classmethod # def from_context(cls, context, options=None): - # config = cls() + # config = cls() # if options: # config = replace(config, **options) # type: ignore - # return config \ No newline at end of file + # return config From dc46fba31d220f60302fa015b0cdfe48c9f89b7e Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Mon, 8 Sep 2025 18:13:46 +0200 Subject: [PATCH 09/19] Add a placeholder for add_dac to skip errors --- message_ix_models/tools/add_dac.py | 37 ++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 message_ix_models/tools/add_dac.py diff --git a/message_ix_models/tools/add_dac.py b/message_ix_models/tools/add_dac.py new file mode 100644 index 0000000000..d4f9eafdf8 --- /dev/null +++ b/message_ix_models/tools/add_dac.py @@ -0,0 +1,37 @@ +"""DAC (Direct Air Capture) tool placeholder. + +This module should provide functions for adding DAC technologies to MESSAGEix scenarios. +Currently this is waiting to be merged from another branch. +This file should be removed when the full functionality is merged. +""" + +import logging +from typing import Any + +log = logging.getLogger(__name__) + + +def add_tech(*args, **kwargs) -> Any: + """Add DAC technology to a scenario. + + This is a stub implementation. The full functionality is available in + another branch. + + Args: + *args: Variable length argument list + **kwargs: Arbitrary keyword arguments + + Returns: + Any: Placeholder return value + + Raises: + NotImplementedError: Always raises this error as the function is not implemented + """ + log.warning( + "add_tech function is not implemented in this branch. " + "The full DAC functionality is available in another branch." + ) + raise NotImplementedError( + "add_tech function is not implemented in this branch. " + "Please use the branch that contains the full DAC implementation." + ) From 880f6e5de6708116156191b692cc6c9e86105d6a Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 16 Sep 2025 22:38:50 +0200 Subject: [PATCH 10/19] Add optional way to add afofi and update workflow --- message_ix_models/model/bmt/workflow.py | 18 +++-- message_ix_models/model/buildings/build.py | 81 +++++++++++++++++----- 2 files changed, 77 insertions(+), 22 deletions(-) diff --git a/message_ix_models/model/bmt/workflow.py b/message_ix_models/model/bmt/workflow.py index 823525d9b6..8be476eed7 100755 --- a/message_ix_models/model/bmt/workflow.py +++ b/message_ix_models/model/bmt/workflow.py @@ -10,9 +10,10 @@ from message_ix_models import Context from message_ix_models.model.buildings.build import build_B as build_B from message_ix_models.model.material.build import build_M as build_M -from message_ix_models.model.transport.build import main as build_T from message_ix_models.workflow import Workflow +# from message_ix_models.model.transport.build import build as build_T + log = logging.getLogger(__name__) # Functions for individual workflow steps @@ -49,6 +50,13 @@ def check_context( # Main BMT workflow +# Baseline starts from SSP_SSP2_v*/baseline_DEFAULT_step_4 +# Baseline with M added starts from SSP_SSP2_v*/baseline_DEFAULT_step_12 + +# Runing test with the order M->BM->others +# Ideal order should be M->MT->BMT + + def generate(context: Context) -> Workflow: """Create the BMT-run workflow.""" wf = Workflow(context) @@ -104,7 +112,9 @@ def generate(context: Context) -> Workflow: "B built", "M solved", build_B, - target=f"{model_name}/baseline_BM", # BM later + target=f"{model_name}/baseline_BM", + # 2025-09-08 baseline_M copied from SSP_SSP2_v6.1/baseline_DEFAULT_step_12 + # or SSP_SSP2_v6.1/baseline_DEFAULT for testing clone=dict(keep_solution=False), ) @@ -113,14 +123,14 @@ def generate(context: Context) -> Workflow: "B built", solve, model="MESSAGE", - target=f"{model_name}/baseline_BM", # BM later + target=f"{model_name}/baseline_BM", clone=dict(keep_solution=False), ) wf.add_step( "T built", "BM solved", - build_T, + # build_T, # TODO: Uncomment when transport build is available target=f"{model_name}/baseline_BMT", clone=dict(keep_solution=False), ) diff --git a/message_ix_models/model/buildings/build.py b/message_ix_models/model/buildings/build.py index b37163c80f..7b645d8996 100644 --- a/message_ix_models/model/buildings/build.py +++ b/message_ix_models/model/buildings/build.py @@ -500,6 +500,7 @@ def prepare_data( sturm_c: pd.DataFrame, with_materials: bool, relations: list[str], + afofi_demand: pd.DataFrame = None, ) -> "ParameterData": """Derive data for MESSAGEix-Buildings from `scenario`.""" @@ -509,27 +510,65 @@ def prepare_data( # Mapping from original to generated commodity names c_map = {f"rc_{name}": f"afofi_{name}" for name in ("spec", "therm")} - # Retrieve shares of AFOFI within rc_spec or rc_therm; dimensions (c, n). These - # values are based on 2010 and 2015 data; see the code for details. - c_share = get_afofi_commodity_shares() + # Handle AFOFI demand - either from CSV file or calculated from shares + # NAVIGATE workflow 2023 should work well with either approach + if afofi_demand is not None: + # Use provided AFOFI demand from CSV file directly - no scaling needed + log.info("Using provided AFOFI demand from CSV file") + result["demand"] = afofi_demand - # Retrieve existing demands - filters: dict[str, Iterable] = dict(c=["rc_spec", "rc_therm"], y=info.Y) - afofi_dd = data_for_quantity( - "par", "demand", "value", scenario, config=dict(filters=filters) - ) + # Still need to create AFOFI technologies, but without scaling + # Identify technologies that output to rc_spec or rc_therm + rc_techs = scenario.par( + "output", filters={"commodity": ["rc_spec", "rc_therm"]} + )["technology"].unique() - # On a second pass (after main() has already run once), rc_spec and rc_therm have - # been stripped out, so `afofi_dd` is empty; skip manipulating it. - if len(afofi_dd): - # - Compute a share (c, n) of rc_* demand (c, n, …) = afofi_* demand - # - Relabel commodities. - tmp = relabel(mul(afofi_dd, c_share), {"c": c_map}) + # Mapping from source to generated names for scale_and_replace + # Exclude technologies that should not be transformed to afofi + exclude_techs = {"sp_el_RC", "sp_el_RC_RT"} + replace = { + "commodity": c_map, + "technology": { + t: re.sub("(rc|RC)", "afofi", t) + for t in rc_techs + if t not in exclude_techs + }, + } + + # Use 1.0 scaling since we have actual demand data + # To match the merge_data call below + t_shares = Quantity(1.0, name="afofi tech share") + + merge_data( + result, + # TODO Remove exclusion once message-ix-models >2025.1.10 is released + scale_and_replace( # type: ignore [arg-type] + scenario, replace, t_shares, relations=relations, relax=0.05 + ), + ) + else: + # Original method: calculate AFOFI demand from shares, added by PNK 2023 + # Retrieve shares of AFOFI within rc_spec or rc_therm; dimensions (c, n). These + # values are based on 2010 and 2015 data; see the code for details. + c_share = get_afofi_commodity_shares() + + # Retrieve existing demands + filters: dict[str, Iterable] = dict(c=["rc_spec", "rc_therm"], y=info.Y) + afofi_dd = data_for_quantity( + "par", "demand", "value", scenario, config=dict(filters=filters) + ) + + # On a second pass (after main() has already run once), rc_spec and rc_therm + # have been stripped out, so `afofi_dd` is empty; skip manipulating it. + if len(afofi_dd): + # - Compute a share (c, n) of rc_* demand (c, n, …) = afofi_* demand + # - Relabel commodities. + tmp = relabel(mul(afofi_dd, c_share), {"c": c_map}) - # Convert back to a MESSAGE data frame - dims = dict(commodity="c", node="n", level="l", year="y", time="h") - # TODO Remove typing exclusion once message_ix is updated for genno 1.25 - result.update(as_message_df(tmp, "demand", dims, {})) # type: ignore [arg-type] + # Convert back to a MESSAGE data frame + dims = dict(commodity="c", node="n", level="l", year="y", time="h") + # TODO Remove typing exclusion once message_ix is updated for genno 1.25 + result.update(as_message_df(tmp, "demand", dims, {})) # type: ignore [arg-type] # Copy technology parameter values from rc_spec and rc_therm to new afofi. # Again, once rc_(spec|therm) are stripped, .par() returns nothing here, so @@ -693,6 +732,7 @@ def main( sturm_c, context.buildings.with_materials, relations=spec.require.set["relation"], + afofi_demand=None, # Use calculated AFOFI demand ) # Remove unused commodities and technologies @@ -898,6 +938,10 @@ def build_B( e_use_path = private_data_path("buildings", "e_use.csv") e_use = pd.read_csv(e_use_path, index_col=0) + # afofio + afofio_path = private_data_path("buildings", "afofio_demand.csv") + afofio = pd.read_csv(afofio_path, index_col=0) + # demand expr = "(cool|heat|hotwater|floor|other_uses)" excl = "v_no_heat" @@ -926,6 +970,7 @@ def build_B( sturm_c, context.buildings.with_materials, relations=spec.require.set["relation"], + afofi_demand=afofio, ) # Remove unused commodities and technologies From 3db56c1a54d839258e0671d70618109f54be80a4 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 16 Sep 2025 22:43:54 +0200 Subject: [PATCH 11/19] Update context build material --- message_ix_models/model/material/build.py | 70 +++++++++++++--------- message_ix_models/model/material/config.py | 24 ++++---- 2 files changed, 55 insertions(+), 39 deletions(-) diff --git a/message_ix_models/model/material/build.py b/message_ix_models/model/material/build.py index c700d2f400..fb62b6d44f 100644 --- a/message_ix_models/model/material/build.py +++ b/message_ix_models/model/material/build.py @@ -3,7 +3,6 @@ from typing import Any, Optional import message_ix -import pandas as pd from message_ix_models import Context from message_ix_models.model.build import apply_spec @@ -283,12 +282,6 @@ def build_M( ) -> message_ix.Scenario: """Set up materials accounting on `scenario`.""" - # Read config and save to context.material - from message_ix_models.model.material.config import Config - - config = Config() - context.material = config - node_suffix = context.model.regions if node_suffix != "R12": @@ -302,37 +295,56 @@ def build_M( # TODO Include this in the spec, while not using it as a value for `node_loc` scenario.platform.add_region(f"{node_suffix}_GLB", "region", "World") + # Read config and add build options to context.material + from message_ix_models.model.material.config import Config + from message_ix_models.model.material.util import read_config + + read_config() + + config = Config.from_context(context) + # Add all config attributes to the existing context.material dictionary + from dataclasses import asdict + + context.material.update(asdict(config)) + # Get the specification and apply to the base scenario spec = make_spec(node_suffix) apply_spec(scenario, spec, add_data, fast=True) # dry_run=True - water_dict = pd.read_excel( - package_data_path("material", "other", "water_tec_pars.xlsx"), - sheet_name=None, - ) - scenario.check_out() - for par in water_dict.keys(): - scenario.add_par(par, water_dict[par]) - scenario.commit("add missing water tecs") + # Repeating the same steps as in build() + add_water_par_data(scenario) # Adjust exogenous energy demand to incorporate the endogenized sectors # Adjust the historical activity of the useful level industry technologies # Coal calibration 2020 - # add_ccs_technologies(scenario) # TODO: Function not found - if context.material.old_calib: + if context.material["old_calib"]: modify_demand_and_hist_activity(scenario) else: - # modify_baseyear_bounds(scenario) # TODO: Function not found - # last_hist_year = scenario.par("historical_activity")["year_act"].max() - # modify_industry_demand(scenario, last_hist_year, - # context.material.iea_data_path) - # TODO: Function not found - # add_new_ind_hist_act(scenario, [last_hist_year], - # context.material.iea_data_path) - # TODO: Function not found - add_emission_accounting(scenario) - - if context.material.modify_existing_constraints: - calibrate_existing_constraints(scenario) + scenario.check_out() + for k, v in gen_other_ind_demands(get_ssp_from_context(context)).items(): + scenario.add_par( + "demand", + v[ + v["year"].isin( + scenario.vintage_and_active_years()["year_act"].unique() + ) + ], + ) + scenario.commit("add new other industry demands") + # overwrite non-Materials industry technology calibration + calib_data = get_hist_act( + scenario, [1990, 1995, 2000, 2010, 2015, 2020], use_cached=True + ) + scenario.check_out() + for k, v in calib_data.items(): + scenario.add_par(k, v) + scenario.commit("new calibration of other industry") + add_emission_accounting(scenario) + add_cement_ccs_co2_tr_relation(scenario) + + if context.material["modify_existing_constraints"]: + calibrate_existing_constraints( + context, scenario, context.material["iea_data_path"] + ) return scenario diff --git a/message_ix_models/model/material/config.py b/message_ix_models/model/material/config.py index 61788c8726..5fb602d429 100644 --- a/message_ix_models/model/material/config.py +++ b/message_ix_models/model/material/config.py @@ -5,8 +5,8 @@ @dataclass class Config: """Configuration for MESSAGEix-Materials - (moving cli options to context so that build material can be called - in other workflows). + (moving cli options to context so that build material can be called in other + workflows). This dataclass stores and documents all configuration settings required and used by :mod:`~message_ix_models.model.material`. It also handles (via @@ -19,12 +19,16 @@ class Config: iea_data_path: str = "P:ene.model\\IEA_database\\Florian\\" modify_existing_constraints: bool = True # hardcoded to True - # examples: common knobs (e.g., SSP knobs) here such as - # clinker ratio, reycling rate, etc. + @classmethod + def from_context(cls, context, options=None): + """Configure `context` for building MESSAGEix-Materials. - # @classmethod - # def from_context(cls, context, options=None): - # config = cls() - # if options: - # config = replace(config, **options) # type: ignore - # return config + :py:`context.material` is updated with configuration values from this Config + class. + """ + from dataclasses import replace + + config = cls() + if options: + config = replace(config, **options) + return config From c98269bc90efc400bd8789806121b12193786666 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 16 Sep 2025 22:47:56 +0200 Subject: [PATCH 12/19] Assign useful energy and demand levels and additional nots --- message_ix_models/model/buildings/build.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/message_ix_models/model/buildings/build.py b/message_ix_models/model/buildings/build.py index 7b645d8996..4ea869e953 100644 --- a/message_ix_models/model/buildings/build.py +++ b/message_ix_models/model/buildings/build.py @@ -511,7 +511,6 @@ def prepare_data( c_map = {f"rc_{name}": f"afofi_{name}" for name in ("spec", "therm")} # Handle AFOFI demand - either from CSV file or calculated from shares - # NAVIGATE workflow 2023 should work well with either approach if afofi_demand is not None: # Use provided AFOFI demand from CSV file directly - no scaling needed log.info("Using provided AFOFI demand from CSV file") @@ -547,7 +546,7 @@ def prepare_data( ), ) else: - # Original method: calculate AFOFI demand from shares, added by PNK 2023 + # Original method: calculate AFOFI demand from shares # Retrieve shares of AFOFI within rc_spec or rc_therm; dimensions (c, n). These # values are based on 2010 and 2015 data; see the code for details. c_share = get_afofi_commodity_shares() @@ -957,7 +956,12 @@ def build_B( & ~sturm_c.commodity.str.contains(excl) ], ] - ).assign(level="useful") + ) + + # Assign useful energy and demand levels + demand = demand.assign( + level=demand.commodity.apply(lambda x: "demand" if "floor" in x else "useful") + ) demand.to_csv("debug-demand.csv") # Prepare data based on the contents of `scenario` From 3eba7da86ef32865fc8b8f1c34c6e85822175b17 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Tue, 16 Sep 2025 23:34:03 +0200 Subject: [PATCH 13/19] Standardize how to treat material double-counting --- message_ix_models/model/bmt/__init__.py | 4 + message_ix_models/model/bmt/utils.py | 126 +++++++++++++++++++++ message_ix_models/model/buildings/build.py | 55 ++------- 3 files changed, 142 insertions(+), 43 deletions(-) create mode 100644 message_ix_models/model/bmt/utils.py diff --git a/message_ix_models/model/bmt/__init__.py b/message_ix_models/model/bmt/__init__.py index 7f6cc1472f..7db7c764f2 100755 --- a/message_ix_models/model/bmt/__init__.py +++ b/message_ix_models/model/bmt/__init__.py @@ -1 +1,5 @@ """BMT runs.""" + +from .utils import subtract_material_demand + +__all__ = ["subtract_material_demand"] diff --git a/message_ix_models/model/bmt/utils.py b/message_ix_models/model/bmt/utils.py new file mode 100644 index 0000000000..ce35ca258b --- /dev/null +++ b/message_ix_models/model/bmt/utils.py @@ -0,0 +1,126 @@ +"""Utility functions for MESSAGEix-BMT (Buildings, Materials, Transport) integration.""" + +import logging +from typing import TYPE_CHECKING + +import pandas as pd + +if TYPE_CHECKING: + from message_ix import Scenario + + from message_ix_models import ScenarioInfo + +log = logging.getLogger(__name__) + + +def subtract_material_demand( + scenario: "Scenario", + info: "ScenarioInfo", + sturm_r: pd.DataFrame, + sturm_c: pd.DataFrame, + method: str = "bm_subtraction", +) -> pd.DataFrame: + """Subtract inter-sector material demand from existing demands in scenario. + + This function provides different approaches for subtracting inter-sector material + demand from the original material demand, due to BM (material inputs for residential + and commercial building construction), PM (material inputs for power capacity), IM + (material inputs for infrastructures), TM (material inputs for new vehicles) links. + + Parameters + ---------- + scenario : message_ix.Scenario + The scenario to modify + info : ScenarioInfo + Scenario information + sturm_r : pd.DataFrame + Residential STURM data + sturm_c : pd.DataFrame + Commercial STURM data + method : str, optional + Method to use for subtraction: + - "bm_subtraction": default, substract entire trajectory + - "im_subtraction": substract base year and rerun material demand projection + - "pm_subtraction": to be determined (currently treated as additional demand) + - "tm_subtraction": to be determined + + Returns + ------- + pd.DataFrame + Modified demand data with material demand subtracted + """ + # Retrieve data once + mat_demand = scenario.par("demand", {"level": "demand"}) + index_cols = ["node", "year", "commodity"] + + if method == "bm_subtraction": + # Subtract the building material demand trajectory from existing demands + for rc, base_data, how in ( + ("resid", sturm_r, "right"), + ("comm", sturm_c, "outer"), + ): + new_col = f"demand_{rc}_const" + + # - Drop columns. + # - Rename "value" to e.g. "demand_resid_const". + # - Extract MESSAGEix-Materials commodity name from STURM commodity name. + # - Drop other rows. + # - Set index. + df = ( + base_data.drop(columns=["level", "time", "unit"]) + .rename(columns={"value": new_col}) + .assign( + commodity=lambda _df: _df.commodity.str.extract( + f"{rc}_mat_demand_(cement|steel|aluminum)", expand=False + ) + ) + .dropna(subset=["commodity"]) + .set_index(index_cols) + ) + + # Merge existing demands at level "demand". + # - how="right": drop all rows in par("demand", …) with no match in `df`. + # - how="outer": keep the union of rows in `mat_demand` (e.g. from sturm_r) + # and in `df` (from sturm_c); fill NA with zeroes. + mat_demand = mat_demand.join(df, on=index_cols, how=how).fillna(0) + + # False if main() is being run for the second time on `scenario` + first_pass = "construction_resid_build" not in info.set["technology"] + + # If not on the first pass, this modification is already performed; skip + if first_pass: + # - Compute new value = (existing value - STURM values), but no less than 0. + # - Drop intermediate column. + mat_demand = ( + mat_demand.eval( + "value = value - demand_comm_const - demand_resid_const" + ) + .assign(value=lambda df: df["value"].clip(0)) + .drop(columns=["demand_comm_const", "demand_resid_const"]) + ) + + elif method == "im_subtraction": + # TODO: to be implemented + log.warning("Method 'im_subtraction' not implemented yet, using bm_subtraction") + return subtract_material_demand( + scenario, info, sturm_r, sturm_c, "bm_subtraction" + ) + + elif method == "pm_subtraction": + # TODO: Implement alternative method 2 + log.warning("Method 'pm_subtraction' not implemented yet, using bm_subtraction") + return subtract_material_demand( + scenario, info, sturm_r, sturm_c, "bm_subtraction" + ) + + elif method == "tm_subtraction": + # TODO: Implement alternative method 3 + log.warning("Method 'tm_subtraction' not implemented yet, using bm_subtraction") + return subtract_material_demand( + scenario, info, sturm_r, sturm_c, "bm_subtraction" + ) + + else: + raise ValueError(f"Unknown method: {method}") + + return mat_demand diff --git a/message_ix_models/model/buildings/build.py b/message_ix_models/model/buildings/build.py index 4ea869e953..cdddccaba9 100644 --- a/message_ix_models/model/buildings/build.py +++ b/message_ix_models/model/buildings/build.py @@ -25,6 +25,7 @@ from message_ix_models import Context, ScenarioInfo, Spec from message_ix_models.model import build +from message_ix_models.model.bmt.utils import subtract_material_demand from message_ix_models.model.structure import ( generate_set_elements, get_codes, @@ -839,50 +840,18 @@ def materials( for name, df in data.items(): result[name].append(df) - # Retrieve data once - mat_demand = scenario.par("demand", {"level": "demand"}) - index_cols = ["node", "year", "commodity"] - - # Subtract building material demand from existing demands in scenario - for rc, base_data, how in (("resid", sturm_r, "right"), ("comm", sturm_c, "outer")): - new_col = f"demand_{rc}_const" - - # - Drop columns. - # - Rename "value" to e.g. "demand_resid_const". - # - Extract MESSAGEix-Materials commodity name from STURM commodity name. - # - Drop other rows. - # - Set index. - df = ( - base_data.drop(columns=["level", "time", "unit"]) - .rename(columns={"value": new_col}) - .assign( - commodity=lambda _df: _df.commodity.str.extract( - f"{rc}_mat_demand_(cement|steel|aluminum)", expand=False - ) - ) - .dropna(subset=["commodity"]) - .set_index(index_cols) - ) + # Use the reusable function to subtract material demand + # One can change the method parameter to use different approaches: + # - "bm_subtraction": Building material subtraction (default) + # - "im_subtraction": Infrastructure material subtraction (to be implemented) + # - "pm_subtraction": Power material subtraction (to be implemented) + # - "tm_subtraction": Transport material subtraction (to be implemented) + mat_demand = subtract_material_demand( + scenario, info, sturm_r, sturm_c, method="bm_subtraction" + ) - # Merge existing demands at level "demand". - # - how="right": drop all rows in par("demand", …) that have no match in `df`. - # - how="outer": keep the union of rows in `mat_demand` (e.g. from sturm_r) and - # in `df` (from sturm_c); fill NA with zeroes. - mat_demand = mat_demand.join(df, on=index_cols, how=how).fillna(0) - - # False if main() is being run for the second time on `scenario` - first_pass = "construction_resid_build" not in info.set["technology"] - - # If not on the first pass, this modification is already performed; skip - if first_pass: - # - Compute new value = (existing value - STURM values), but no less than 0. - # - Drop intermediate column. - # - Add to combined data. - result["demand"].append( - mat_demand.eval("value = value - demand_comm_const - demand_resid_const") - .assign(value=lambda df: df["value"].clip(0)) - .drop(columns=["demand_comm_const", "demand_resid_const"]) - ) + # Add the modified demand to results + result["demand"].append(mat_demand) # Concatenate data frames together return {k: pd.concat(v) for k, v in result.items()} From 1fa049b29d15a8118cee093e0202eeb240718276 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Wed, 17 Sep 2025 00:00:52 +0200 Subject: [PATCH 14/19] Add material subtraction vetting --- message_ix_models/model/bmt/utils.py | 106 +++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) diff --git a/message_ix_models/model/bmt/utils.py b/message_ix_models/model/bmt/utils.py index ce35ca258b..626b527eaa 100644 --- a/message_ix_models/model/bmt/utils.py +++ b/message_ix_models/model/bmt/utils.py @@ -13,12 +13,106 @@ log = logging.getLogger(__name__) +def _generate_vetting_csv( + original_demand: pd.DataFrame, + modified_demand: pd.DataFrame, + output_path: str, +) -> None: + """Generate a CSV file showing material demand subtraction details. + + Parameters + ---------- + original_demand : pd.DataFrame + Original demand data before subtraction + modified_demand : pd.DataFrame + Modified demand data after subtraction + output_path : str + Path where to save the vetting CSV file + """ + # Reset index to work with columns + orig = original_demand.reset_index() + mod = modified_demand.reset_index() + + # Merge original and modified data + vetting_data = orig.merge( + mod, + on=["node", "year", "commodity"], + suffixes=("_original", "_modified"), + how="outer", + ).fillna(0) + + # Calculate subtraction amounts and percentages + vetting_data["subtracted_amount"] = ( + vetting_data["value_original"] - vetting_data["value_modified"] + ) + + # Calculate percentage subtracted (avoid division by zero) + vetting_data["subtraction_percentage"] = ( + vetting_data["subtracted_amount"] + / vetting_data["value_original"].replace(0, 1) + * 100 + ) + + # Replace infinite values with 0 (when original was 0) + vetting_data["subtraction_percentage"] = vetting_data[ + "subtraction_percentage" + ].replace([float("inf"), -float("inf")], 0) + + # Round to reasonable precision + vetting_data["subtraction_percentage"] = vetting_data[ + "subtraction_percentage" + ].round(2) + + # Select and rename columns for clarity + output_columns = [ + "node", + "year", + "commodity", + "value_original", + "value_modified", + "subtracted_amount", + "subtraction_percentage", + ] + + vetting_data = vetting_data[output_columns].copy() + vetting_data.columns = [ + "node", + "year", + "commodity", + "original_demand", + "modified_demand", + "subtracted_amount", + "subtraction_percentage", + ] + + # # Filter out rows where no subtraction occurred + # vetting_data = vetting_data[vetting_data["subtracted_amount"] > 0] + + # Sort by commodity, node, year for better readability + vetting_data = vetting_data.sort_values(["commodity", "node", "year"]) + + # Save to CSV + vetting_data.to_csv(output_path, index=False) + + log.info(f"Vetting CSV saved to: {output_path}") + + # Log summary statistics + if len(vetting_data) > 0: + avg_pct = vetting_data["subtraction_percentage"].mean() + max_pct = vetting_data["subtraction_percentage"].max() + log.info(f"Average subtraction percentage: {avg_pct:.2f}%") + log.info(f"Max subtraction percentage: {max_pct:.2f}%") + + +# Maybe it is better to have one function for each method? def subtract_material_demand( scenario: "Scenario", info: "ScenarioInfo", sturm_r: pd.DataFrame, sturm_c: pd.DataFrame, method: str = "bm_subtraction", + generate_vetting_csv: bool = True, + vetting_output_path: str = "material_demand_subtraction_vetting.csv", ) -> pd.DataFrame: """Subtract inter-sector material demand from existing demands in scenario. @@ -43,6 +137,11 @@ def subtract_material_demand( - "im_subtraction": substract base year and rerun material demand projection - "pm_subtraction": to be determined (currently treated as additional demand) - "tm_subtraction": to be determined + generate_vetting_csv : bool, optional + Whether to generate a CSV file showing subtraction details (default: True) + vetting_output_path : str, optional + Path for the vetting CSV file (default: + "material_demand_subtraction_vetting.csv") Returns ------- @@ -54,6 +153,9 @@ def subtract_material_demand( index_cols = ["node", "year", "commodity"] if method == "bm_subtraction": + # Store original demand for vetting if requested + original_demand = mat_demand.copy() if generate_vetting_csv else None + # Subtract the building material demand trajectory from existing demands for rc, base_data, how in ( ("resid", sturm_r, "right"), @@ -99,6 +201,10 @@ def subtract_material_demand( .drop(columns=["demand_comm_const", "demand_resid_const"]) ) + # Generate vetting CSV if requested + if generate_vetting_csv and original_demand is not None: + _generate_vetting_csv(original_demand, mat_demand, vetting_output_path) + elif method == "im_subtraction": # TODO: to be implemented log.warning("Method 'im_subtraction' not implemented yet, using bm_subtraction") From 5972dfa18bb2ab1dd8264f1864a10afdad6aad2a Mon Sep 17 00:00:00 2001 From: JU Yiyi Date: Wed, 17 Sep 2025 09:58:32 +0200 Subject: [PATCH 15/19] Update workflow for takeaway BM scenario --- message_ix_models/model/buildings/build.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/message_ix_models/model/buildings/build.py b/message_ix_models/model/buildings/build.py index cdddccaba9..70083488e1 100644 --- a/message_ix_models/model/buildings/build.py +++ b/message_ix_models/model/buildings/build.py @@ -893,22 +893,25 @@ def build_B( prices = pd.read_csv(price_path) # sturm_r - sturm_r_path = private_data_path("buildings", "resid_sturm.csv") + sturm_r_path = private_data_path("buildings", "resid_sturm_20250915.csv") # sturm_r_path = package_data_path("buildings", "debug-sturm-resid.csv") sturm_r = pd.read_csv(sturm_r_path, index_col=0) # sturm_c - sturm_c_path = private_data_path("buildings", "comm_sturm.csv") + sturm_c_path = private_data_path("buildings", "comm_sturm_20250915.csv") # sturm_c_path = package_data_path("buildings", "debug-sturm-comm.csv") sturm_c = pd.read_csv(sturm_c_path, index_col=0) # e_use - e_use_path = private_data_path("buildings", "e_use.csv") + e_use_path = private_data_path("buildings", "e_use_20250915.csv") e_use = pd.read_csv(e_use_path, index_col=0) + # Exclude rows with commodity 'resid_cook_non-comm' + e_use = e_use[e_use.commodity != 'resid_cook_non-comm'] # afofio - afofio_path = private_data_path("buildings", "afofio_demand.csv") + afofio_path = private_data_path("buildings", "afofio_demand_20250915.csv") afofio = pd.read_csv(afofio_path, index_col=0) + afofio["value"] = 0 # demand expr = "(cool|heat|hotwater|floor|other_uses)" From 88cbe0ffde6cfe3d49a6f0d4b53d814f32945e41 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Wed, 17 Sep 2025 10:26:45 +0200 Subject: [PATCH 16/19] Update bm_subtraction method --- message_ix_models/model/bmt/utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/message_ix_models/model/bmt/utils.py b/message_ix_models/model/bmt/utils.py index 626b527eaa..9f5864702f 100644 --- a/message_ix_models/model/bmt/utils.py +++ b/message_ix_models/model/bmt/utils.py @@ -148,8 +148,11 @@ def subtract_material_demand( pd.DataFrame Modified demand data with material demand subtracted """ + # Method adopted in NAVIGATE workflow 2023 by PNK # Retrieve data once - mat_demand = scenario.par("demand", {"level": "demand"}) + target_commodities = ["cement", "steel", "aluminum"] + # Updated filter from level to commodities to avoid non-material commodities + mat_demand = scenario.par("demand", {"commodity": target_commodities}) index_cols = ["node", "year", "commodity"] if method == "bm_subtraction": @@ -173,7 +176,10 @@ def subtract_material_demand( .rename(columns={"value": new_col}) .assign( commodity=lambda _df: _df.commodity.str.extract( - f"{rc}_mat_demand_(cement|steel|aluminum)", expand=False + f"{rc}_mat_demand_(cement|steel|aluminum)", + expand=False, + # Directly provided by STURM reporting + # No need to multiply intensities and floor space ) ) .dropna(subset=["commodity"]) From 1ec2cabd452cd68d0665ed61f9c6a14e9cf45c2c Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Wed, 17 Sep 2025 18:11:43 +0200 Subject: [PATCH 17/19] Update add power in material build --- message_ix_models/model/material/data_power_sector.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/message_ix_models/model/material/data_power_sector.py b/message_ix_models/model/material/data_power_sector.py index c98a912f72..c3df0c118a 100644 --- a/message_ix_models/model/material/data_power_sector.py +++ b/message_ix_models/model/material/data_power_sector.py @@ -253,6 +253,7 @@ def read_material_intensities( "node_origin": n, "commodity": c, "level": "product", + "time": "year", "time_origin": "year", "value": val_cap_new, "unit": "t/kW", @@ -282,6 +283,7 @@ def read_material_intensities( "node_origin": n, "commodity": c, "level": "product", + "time": "year", "time_origin": "year", "value": val_cap_input_ret, "unit": "t/kW", @@ -311,6 +313,7 @@ def read_material_intensities( "node_dest": n, "commodity": c, "level": "end_of_life", + "time": "year", "time_dest": "year", "value": val_cap_output_ret, "unit": "t/kW", From f477296b404e1ea397b5cd5fbb09f4bd4908db21 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Wed, 17 Sep 2025 18:13:50 +0200 Subject: [PATCH 18/19] Add build_PM function for power sector material intensity --- message_ix_models/model/bmt/__init__.py | 4 +- message_ix_models/model/bmt/utils.py | 49 +++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/message_ix_models/model/bmt/__init__.py b/message_ix_models/model/bmt/__init__.py index 7db7c764f2..274ad7ede6 100755 --- a/message_ix_models/model/bmt/__init__.py +++ b/message_ix_models/model/bmt/__init__.py @@ -1,5 +1,5 @@ """BMT runs.""" -from .utils import subtract_material_demand +from .utils import build_PM, subtract_material_demand -__all__ = ["subtract_material_demand"] +__all__ = ["build_PM", "subtract_material_demand"] diff --git a/message_ix_models/model/bmt/utils.py b/message_ix_models/model/bmt/utils.py index 9f5864702f..d823a8a4d3 100644 --- a/message_ix_models/model/bmt/utils.py +++ b/message_ix_models/model/bmt/utils.py @@ -5,6 +5,9 @@ import pandas as pd +from message_ix_models.model.material.data_power_sector import gen_data_power_sector +from message_ix_models.util import add_par_data + if TYPE_CHECKING: from message_ix import Scenario @@ -236,3 +239,49 @@ def subtract_material_demand( raise ValueError(f"Unknown method: {method}") return mat_demand + + +def build_PM(context, scenario: "Scenario", **kwargs) -> "Scenario": + """Build the material intensity for power capacities. + + This function adds power sector material intensity parameters (input_cap_new, + input_cap_ret, output_cap_new, output_cap_ret) to the scenario if they do not + already exist. + + Parameters + ---------- + context + The context of the scenario to be add intensity data to. + scenario : message_ix.Scenario + The scenario to add material-power sector linkages to. + **kwargs + Additional keyword arguments (ignored, for workflow compatibility). + """ + # Check if power sector material data already exists + if scenario.has_par("input_cap_new"): + try: + existing_data = scenario.par("input_cap_new") + if ( + not existing_data.empty + and "cement" in existing_data.get("commodity", pd.Series()).values + ): + log.info( + "Power sector material intensity data already exists " + "(found cement in input_cap_new). Skipping build_pm." + ) + return scenario + except Exception as e: + log.warning(f"Could not check existing input_cap_new data: {e}") + + log.info("Adding material intensity for power capacities...") + scenario.check_out() + try: + power_data = gen_data_power_sector(scenario, dry_run=False) + add_par_data(scenario, power_data, dry_run=False) + # but actually do not know how to provide log info while adding those parameters + log.info("Successfully added power sector material intensity data.") + except Exception as e: + log.error(f"Error adding power sector material data: {e}") + raise + + return scenario From 19a4094b55206a9585fb136d3a3d35c5945a64a7 Mon Sep 17 00:00:00 2001 From: yiyi1991 Date: Wed, 17 Sep 2025 18:17:13 +0200 Subject: [PATCH 19/19] Update worklfow with PM in the last step --- message_ix_models/model/bmt/workflow.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/message_ix_models/model/bmt/workflow.py b/message_ix_models/model/bmt/workflow.py index 8be476eed7..c97c198f22 100755 --- a/message_ix_models/model/bmt/workflow.py +++ b/message_ix_models/model/bmt/workflow.py @@ -8,6 +8,7 @@ import message_ix from message_ix_models import Context +from message_ix_models.model.bmt.utils import build_PM from message_ix_models.model.buildings.build import build_B as build_B from message_ix_models.model.material.build import build_M as build_M from message_ix_models.workflow import Workflow @@ -144,6 +145,15 @@ def generate(context: Context) -> Workflow: clone=False, ) + wf.add_step( + "P built", + "BMT baseline solved", + build_PM, + model="MESSAGE", + target=f"{model_name}/baseline_BMP", + clone=False, + ) + wf.add_step( "NPi2030", "BMT baseline solved",