From 0114b0a9517a32ad6d924adfe1e8c897f3ce3d15 Mon Sep 17 00:00:00 2001 From: Nikita Kalyazin Date: Mon, 24 Mar 2025 15:33:39 +0000 Subject: [PATCH 1/2] test(api): exclude /snapshot/load from duration check When restoring from snapshot via UFFD, if the UFFD handler is eager enough (eg like the fault-all handler we use for testing), memory population for a sufficiently large VM may take longer than the limit we have. Signed-off-by: Nikita Kalyazin --- tests/framework/microvm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/framework/microvm.py b/tests/framework/microvm.py index d3a5fdceaf9..45375cd9c61 100644 --- a/tests/framework/microvm.py +++ b/tests/framework/microvm.py @@ -390,7 +390,7 @@ def _validate_api_response_times(self): "Got API call duration log entry before request entry" ) - if current_call.url != "/snapshot/create": + if current_call.url not in ["/snapshot/create", "/snapshot/load"]: exec_time = float(match.group("execution_time")) / 1000.0 assert ( From 2442ac59a3efa4adbbfe43df48ee6e3b2567f4b4 Mon Sep 17 00:00:00 2001 From: Nikita Kalyazin Date: Mon, 24 Mar 2025 15:35:37 +0000 Subject: [PATCH 2/2] test(devtool): sextuple number of preallocated huge pages With current number of huge pages (4096) being ok for 1GiB VMs, we need 6 times more of them for 6GiB VMs. Signed-off-by: Nikita Kalyazin --- tools/devtool | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/devtool b/tools/devtool index 44f10657c06..57637a553cc 100755 --- a/tools/devtool +++ b/tools/devtool @@ -743,9 +743,9 @@ cmd_test() { # It seems that even if the tests using huge pages run sequentially on ag=1 agents, right-sizing the huge pages # pool to the total number of huge pages used across all tests results in spurious failures with pool depletion # anyway (something else on the host seems to be stealing our huge pages, and we cannot "ear mark" them for - # Firecracker processes). Thus, just allocate 8GB of them and call it a day. + # Firecracker processes). Thus, just allocate 48GB of them and call it a day. say "Setting up huge pages pool" - num_hugetlbfs_pages=4096 + num_hugetlbfs_pages=24552 huge_pages_old=$(cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages) huge_pages_new=$(echo $num_hugetlbfs_pages |sudo tee /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages)