Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CI] Fix the runner's memory check #9309

Merged
merged 20 commits into from
Mar 28, 2024
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
Show all changes
20 commits
Select commit Hold shift + click to select a range
f240b49
Bytes, not kilobytes.
mwu-tow Mar 6, 2024
785430b
Merge remote-tracking branch 'origin/develop' into wip/mwu/memory-che…
mwu-tow Mar 6, 2024
a0ea88e
Merge branch 'develop' into wip/mwu/memory-check-fix
mergify[bot] Mar 12, 2024
f8767d7
Merge branch 'develop' into wip/mwu/memory-check-fix
mergify[bot] Mar 12, 2024
9d745ad
Merge branch 'develop' into wip/mwu/memory-check-fix
mergify[bot] Mar 13, 2024
d4a2066
Merge branch 'develop' into wip/mwu/memory-check-fix
mergify[bot] Mar 13, 2024
17e7d12
Merge branch 'develop' into wip/mwu/memory-check-fix
mergify[bot] Mar 13, 2024
aa159af
unify paths for mac and non-mac
mwu-tow Mar 15, 2024
b2fde8d
Merge remote-tracking branch 'origin/develop' into wip/mwu/memory-che…
mwu-tow Mar 15, 2024
19f87f8
cleanup benchmark handling
mwu-tow Mar 15, 2024
5245c82
update
mwu-tow Mar 15, 2024
6fc6893
Merge remote-tracking branch 'origin/develop' into wip/mwu/memory-che…
mwu-tow Mar 18, 2024
49f3aeb
Merge remote-tracking branch 'origin/develop' into wip/mwu/memory-che…
mwu-tow Mar 18, 2024
5e31fcb
fmt
mwu-tow Mar 18, 2024
1f8047a
as per discussion
mwu-tow Mar 19, 2024
08c6bd5
Merge remote-tracking branch 'origin/develop' into wip/mwu/memory-che…
mwu-tow Mar 19, 2024
2e99f89
undo
mwu-tow Mar 20, 2024
67c18a7
Merge remote-tracking branch 'origin/develop' into wip/mwu/memory-che…
mwu-tow Mar 20, 2024
4e11beb
Merge remote-tracking branch 'origin/develop' into wip/mwu/memory-che…
mwu-tow Mar 25, 2024
c9aa893
Potentially fix further `std-benchmarks` issues
hubertp Mar 26, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion build/build/src/ci.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ pub mod inputs {
///
/// Certain CI operations are only performed on big machines, as they require a lot of memory.
pub fn big_memory_machine() -> bool {
let github_hosted_macos_memory = 15_032_385;
let github_hosted_macos_memory = 15_032_385_536;
let mut system = sysinfo::System::new();
system.refresh_memory();
system.total_memory() > github_hosted_macos_memory
Expand Down
131 changes: 42 additions & 89 deletions build/build/src/engine/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -338,55 +338,30 @@ impl RunContext {
// we don't want to call this in environments like GH-hosted runners.

// === Build project-manager distribution and native image ===
debug!("Bulding project-manager distribution and Native Image");
if crate::ci::big_memory_machine() {
let mut tasks = vec![];

if self.config.build_engine_package() {
tasks.push("buildEngineDistribution");
tasks.push("engine-runner/assembly");
}
if self.config.build_native_runner {
tasks.push("engine-runner/buildNativeImage");
}

if self.config.build_project_manager_package() {
tasks.push("buildProjectManagerDistribution");
}

if self.config.build_launcher_package() {
tasks.push("buildLauncherDistribution");
}
let mut tasks = vec![];
if self.config.build_engine_package() {
tasks.push("engine-runner/assembly");
tasks.push("buildEngineDistribution");
}
Comment on lines +341 to +344
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

buildEngineDistribution is a command required for many other commands (for example, dry-run benchmarks and Enso tests). I believe that, at least on Linux, this command is usually run as the very first command. We should ensure that this is run also on other architectures. If not, we may bump into weird dependency issues. Please ensure that self.config.build_engine_package is set on MacOS and Windows as well.

if self.config.build_native_runner {
tasks.push("engine-runner/buildNativeImage");
}
if self.config.build_project_manager_package() {
tasks.push("buildProjectManagerDistribution");
}
if self.config.build_launcher_package() {
tasks.push("buildLauncherDistribution");
}

if !tasks.is_empty() {
if !tasks.is_empty() {
debug!("Building distributions and native images.");
if crate::ci::big_memory_machine() {
sbt.call_arg(Sbt::concurrent_tasks(tasks)).await?;
} else {
sbt.call_arg(Sbt::sequential_tasks(tasks)).await?;
}
} else {
// If we are run on a weak machine (like GH-hosted runner), we need to build things one
// by one.
sbt.call_arg("compile").await?;

// Build the Runner & Runtime Uberjars
sbt.call_arg("engine-runner/assembly").await?;

// Build the Launcher Native Image
sbt.call_arg("launcher/assembly").await?;
sbt.call_args(&["--mem", "1536", "launcher/buildNativeImage"]).await?;

// Build the PM Native Image
sbt.call_arg("project-manager/assembly").await?;
sbt.call_args(&["--mem", "1536", "project-manager/buildNativeImage"]).await?;

// Prepare Launcher Distribution
//create_launcher_package(&paths)?;
sbt.call_arg("buildLauncherDistribution").await?;

// Prepare Engine Distribution
sbt.call_arg("buildEngineDistribution").await?;

// Prepare Project Manager Distribution
sbt.call_arg("buildProjectManagerDistribution").await?;
}

// === End of Build project-manager distribution and native image ===

let ret = self.expected_artifacts();
Expand Down Expand Up @@ -446,52 +421,30 @@ impl RunContext {

// === Run benchmarks ===
debug!("Running benchmarks.");
if crate::ci::big_memory_machine() {
let mut tasks = vec![];
// This just compiles benchmarks, not run them. At least we'll know that they can be
// run. Actually running them, as part of this routine, would be too heavy.
// TODO [mwu] It should be possible to run them through context config option.
if self.config.build_benchmarks {
tasks.extend([
"runtime-benchmarks/compile",
"language-server/Benchmark/compile",
"searcher/Benchmark/compile",
"std-benchmarks/Benchmark/compile",
]);
}

let build_command = (!tasks.is_empty()).then_some(Sbt::concurrent_tasks(tasks));

// We want benchmarks to run only after the other build tasks are done, as they are
// really CPU-heavy.
let benchmark_tasks = self.config.execute_benchmarks.iter().flat_map(|b| b.sbt_task());
let command_sequence = build_command.as_deref().into_iter().chain(benchmark_tasks);
let final_command = Sbt::sequential_tasks(command_sequence);
if !final_command.is_empty() {
sbt.call_arg(final_command).await?;
let build_benchmark_task = if self.config.build_benchmarks {
let build_benchmark_task_names = [
"runtime-benchmarks/compile",
"language-server/Benchmark/compile",
"searcher/Benchmark/compile",
"std-benchmarks/compile",
];
if crate::ci::big_memory_machine() {
Some(Sbt::concurrent_tasks(build_benchmark_task_names))
} else {
debug!("No SBT tasks to run.");
Some(Sbt::sequential_tasks(build_benchmark_task_names))
}
} else {
if self.config.build_benchmarks {
// Check Runtime Benchmark Compilation
sbt.call_arg("runtime-benchmarks/compile").await?;

// Check Language Server Benchmark Compilation
sbt.call_arg("language-server/Benchmark/compile").await?;

// Check Searcher Benchmark Compilation
sbt.call_arg("searcher/Benchmark/compile").await?;

// Check Enso JMH benchmark compilation
sbt.call_arg("std-benchmarks/Benchmark/compile").await?;
}

for benchmark in &self.config.execute_benchmarks {
if let Some(task) = benchmark.sbt_task() {
sbt.call_arg(task).await?;
}
}
None
};
let execute_benchmark_tasks =
self.config.execute_benchmarks.iter().flat_map(|b| b.sbt_task());
let build_and_execute_benchmark_task =
build_benchmark_task.as_deref().into_iter().chain(execute_benchmark_tasks);
let benchmark_command = Sbt::sequential_tasks(build_and_execute_benchmark_task);
if !benchmark_command.is_empty() {
sbt.call_arg(benchmark_command).await?;
} else {
debug!("No SBT tasks to run.");
}

if self.config.execute_benchmarks.contains(&Benchmarks::Enso) {
Expand All @@ -500,10 +453,10 @@ impl RunContext {
enso.run_benchmarks(BenchmarkOptions { dry_run: true }).await?;
}

// If we were running any benchmarks, they are complete by now. Upload the report.
if is_in_env() {
self.upload_native_image_arg_files().await?;

// If we were running any benchmarks, they are complete by now. Upload the report.
for bench in &self.config.execute_benchmarks {
match bench {
Benchmarks::Runtime => {
Expand Down
Loading