Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion common.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"Jsonnet files should not include this file directly but use ci/common.jsonnet instead."
],

"mx_version": "7.65.3",
"mx_version": "7.67.0",

"COMMENT.jdks": "When adding or removing JDKs keep in sync with JDKs in ci/common.jsonnet",
"jdks": {
Expand Down
28 changes: 18 additions & 10 deletions sdk/mx.sdk/mx_sdk_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
import mx_sdk_vm_impl
import mx_util
from mx_util import Stage, StageName, Layer
from mx_benchmark import DataPoints, DataPoint, BenchmarkSuite, Vm, SingleBenchmarkExecutionContext, ForkInfo
from mx_benchmark import DataPoints, DataPoint, BenchmarkSuite, bm_exec_context, ConstantContextValueManager, SingleBenchmarkManager
from mx_sdk_vm_impl import svm_experimental_options

_suite = mx.suite('sdk')
Expand Down Expand Up @@ -1786,7 +1786,7 @@ def get_layer_aware_build_args(self) -> List[str]:

def run_stage_image(self):
executable_name_args = ['-o', self.config.final_image_name]
pgo_args = [f"--pgo={self.config.profile_path}"]
pgo_args = [f"--pgo={self.config.bm_suite.get_pgo_profile_for_image_build(self.config.profile_path)}"]
if self.pgo_use_perf:
# -g is already set in base_image_build_args if we're not using perf. When using perf, if debug symbols
# are present they will interfere with sample decoding using source mappings.
Expand Down Expand Up @@ -1946,8 +1946,8 @@ def _prepare_for_running(self, args, out, err, cwd, nonZeroIsFatal):
self.stages_context = StagesContext(self, out, err, nonZeroIsFatal, os.path.abspath(cwd if cwd else os.getcwd()))
file_name = f"staged-benchmark.{self.ext}"
output_dir = self.bmSuite.get_image_output_dir(
self.bmSuite.benchmark_output_dir(self.bmSuite.execution_context.benchmark, args),
self.bmSuite.get_full_image_name(self.bmSuite.get_base_image_name(), self.bmSuite.execution_context.virtual_machine.config_name())
self.bmSuite.benchmark_output_dir(bm_exec_context().get("benchmark"), args),
self.bmSuite.get_full_image_name(self.bmSuite.get_base_image_name(), bm_exec_context().get("vm").config_name())
)
self.staged_program_file_path = output_dir / file_name
self.staged_program_file_path.parent.mkdir(parents=True, exist_ok=True)
Expand Down Expand Up @@ -3178,7 +3178,7 @@ def subgroup(self):
return "graal-compiler"

def benchmarkName(self):
return self.execution_context.benchmark
return bm_exec_context().get("benchmark")

def benchmarkList(self, bmSuiteArgs):
exclude = []
Expand Down Expand Up @@ -3226,8 +3226,9 @@ def validateEnvironment(self):
self.baristaProjectConfigurationPath()
self.baristaHarnessPath()

def new_execution_context(self, vm: Optional[Vm], benchmarks: List[str], bmSuiteArgs: List[str], fork_info: Optional[ForkInfo] = None) -> SingleBenchmarkExecutionContext:
return SingleBenchmarkExecutionContext(self, vm, benchmarks, bmSuiteArgs, fork_info)
def run(self, benchmarks, bmSuiteArgs) -> DataPoints:
with SingleBenchmarkManager(self):
return super().run(benchmarks, bmSuiteArgs)

def createCommandLineArgs(self, benchmarks, bmSuiteArgs):
# Pass the VM options, BaristaCommand will form the final command.
Expand Down Expand Up @@ -3490,7 +3491,7 @@ def produceHarnessCommand(self, cmd, suite):
jvm_vm_options = jvm_cmd[index_of_java_exe + 1:]

# Verify that the run arguments don't already contain a "--mode" option
run_args = suite.runArgs(suite.execution_context.bmSuiteArgs) + self._energyTrackerExtraOptions(suite)
run_args = suite.runArgs(bm_exec_context().get("bm_suite_args")) + self._energyTrackerExtraOptions(suite)
mode_pattern = r"^(?:-m|--mode)(=.*)?$"
mode_match = self._regexFindInCommand(run_args, mode_pattern)
if mode_match:
Expand Down Expand Up @@ -4128,7 +4129,7 @@ def intercept_run(self, super_delegate: BenchmarkSuite, benchmarks, bm_suite_arg
datapoints: List[DataPoint] = []

vm = self.get_vm_registry().get_vm_from_suite_args(bm_suite_args)
with self.new_execution_context(vm, benchmarks, bm_suite_args):
with ConstantContextValueManager("vm", vm):
effective_stages, complete_stage_list = vm.prepare_stages(self, bm_suite_args)
self.stages_info = StagesInfo(effective_stages, complete_stage_list, vm)

Expand Down Expand Up @@ -4261,7 +4262,7 @@ def run(self, benchmarks, bm_suite_args: List[str]) -> DataPoints:
fallback_reason = self.fallback_mode_reason(bm_suite_args)

vm = self.get_vm_registry().get_vm_from_suite_args(bm_suite_args)
with self.new_execution_context(vm, benchmarks, bm_suite_args):
with ConstantContextValueManager("vm", vm):
effective_stages, complete_stage_list = vm.prepare_stages(self, bm_suite_args)
self.stages_info = StagesInfo(effective_stages, complete_stage_list, vm, bool(fallback_reason))

Expand Down Expand Up @@ -4502,6 +4503,13 @@ def get_image_output_dir(self, benchmark_output_dir: str, full_image_name: str)
"""
return Path(benchmark_output_dir).absolute() / "native-image-benchmarks" / full_image_name

def get_pgo_profile_for_image_build(self, default_pgo_profile: str) -> str:
vm_args = self.vmArgs(bm_exec_context().get("bm_suite_args"))
parsed_arg = parse_prefixed_arg("-Dnative-image.benchmark.pgo=", vm_args, "Native Image benchmark PGO profiles should only be specified once!")
if not parsed_arg:
return default_pgo_profile
return parsed_arg


def measureTimeToFirstResponse(bmSuite):
protocolHost = bmSuite.serviceHost()
Expand Down
2 changes: 1 addition & 1 deletion sdk/mx.sdk/suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
# SOFTWARE.
#
suite = {
"mxversion": "7.58.6",
"mxversion": "7.67.0",
"name" : "sdk",
"version" : "25.1.0",
"release" : False,
Expand Down
28 changes: 13 additions & 15 deletions substratevm/mx.substratevm/mx_substratevm_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import mx
import mx_benchmark
import mx_sdk_benchmark
from mx_benchmark import bm_exec_context, SingleBenchmarkManager
from mx_sdk_benchmark import SUCCESSFUL_STAGE_PATTERNS, parse_prefixed_args
from mx_util import StageName, Layer

Expand Down Expand Up @@ -291,12 +292,7 @@ def benchmarkList(self, bmSuiteArgs):

def default_stages(self) -> List[str]:
if self.benchmarkName() == "micronaut-pegasus":
if (
self.execution_context and
self.execution_context.virtual_machine and
self.execution_context.virtual_machine.config_name() and
self.execution_context.virtual_machine.config_name().endswith("-ce")
):
if bm_exec_context().has("vm") and bm_exec_context().get("vm").config_name().endswith("-ce"):
# fails on CE due to --enable-sbom EE only option injected from upstream pom (GR-66891)
return []
# The 'agent' stage is not supported, as currently we cannot run micronaut-pegasus on the JVM (GR-59793)
Expand Down Expand Up @@ -394,7 +390,8 @@ def build_assertions(self, benchmark: str, is_gate: bool) -> List[str]:
return super().build_assertions(benchmark, is_gate)

def run(self, benchmarks, bmSuiteArgs) -> mx_benchmark.DataPoints:
return self.intercept_run(super(), benchmarks, bmSuiteArgs)
with SingleBenchmarkManager(self):
return self.intercept_run(super(), benchmarks, bmSuiteArgs)

def ensure_image_is_at_desired_location(self, bmSuiteArgs):
if self.stages_info.current_stage.is_image() and self.application_fixed_image_name() is not None:
Expand Down Expand Up @@ -441,7 +438,7 @@ def _get_built_app_image(self, suite, stage):
In the case of `instrument-run`, retrieves the image built during `instrument-image`.
In the case of `run`, retrieves the image built during `image`.
"""
vm = suite.execution_context.virtual_machine
vm = bm_exec_context().get("vm")
if stage.stage_name == StageName.INSTRUMENT_RUN:
return vm.config.instrumented_image_path
else:
Expand Down Expand Up @@ -470,15 +467,16 @@ def produceHarnessCommand(self, cmd, suite):
raise TypeError(f"Expected an instance of {BaristaNativeImageBenchmarkSuite.__name__}, instead got an instance of {suite.__class__.__name__}")

stage = suite.stages_info.current_stage
bm_suite_args = bm_exec_context().get("bm_suite_args")
if stage.is_agent():
# BaristaCommand works for agent stage, since it's a JVM stage
cmd = self.produce_JVM_harness_command(cmd, suite)
# Make agent run short
cmd += self._short_load_testing_phases()
# Add explicit agent stage args
cmd += self._energyTrackerExtraOptions(suite)
cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", suite.execution_context.bmSuiteArgs)
cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-agent-run-arg=", suite.execution_context.bmSuiteArgs)
cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", bm_suite_args)
cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-agent-run-arg=", bm_suite_args)
return cmd

# Extract app image options and command prefix from the NativeImageVM command
Expand All @@ -499,18 +497,18 @@ def produceHarnessCommand(self, cmd, suite):
ni_barista_cmd = [suite.baristaHarnessPath(), "--mode", "native", "--app-executable", app_image]
if barista_workload is not None:
ni_barista_cmd.append(f"--config={barista_workload}")
ni_barista_cmd += suite.runArgs(suite.execution_context.bmSuiteArgs) + self._energyTrackerExtraOptions(suite)
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", suite.execution_context.bmSuiteArgs)
ni_barista_cmd += suite.runArgs(bm_suite_args) + self._energyTrackerExtraOptions(suite)
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-jvm-arg=", bm_suite_args)
if stage.is_instrument():
# Make instrument run short
ni_barista_cmd += self._short_load_testing_phases()
if suite.execution_context.benchmark == "play-scala-hello-world":
if bm_exec_context().get("benchmark") == "play-scala-hello-world":
self._updateCommandOption(ni_barista_cmd, "--vm-options", "-v", "-Dpidfile.path=/dev/null")
# Add explicit instrument stage args
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-profile-run-arg=", suite.execution_context.bmSuiteArgs) or parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", suite.execution_context.bmSuiteArgs)
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-profile-run-arg=", bm_suite_args) or parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", bm_suite_args)
else:
# Add explicit run stage args
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", suite.execution_context.bmSuiteArgs)
ni_barista_cmd += parse_prefixed_args("-Dnative-image.benchmark.extra-run-arg=", bm_suite_args)
if nivm_cmd_prefix:
self._updateCommandOption(ni_barista_cmd, "--cmd-app-prefix", "-p", " ".join(nivm_cmd_prefix))
if nivm_app_options:
Expand Down
2 changes: 1 addition & 1 deletion substratevm/mx.substratevm/suite.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# pylint: disable=line-too-long
suite = {
"mxversion": "7.58.6",
"mxversion": "7.67.0",
"name": "substratevm",
"version" : "25.1.0",
"release" : False,
Expand Down
Loading