From d3fe0880eb793de3f528a43e6cae21a4f0299a00 Mon Sep 17 00:00:00 2001 From: Thurston Dang Date: Mon, 20 Oct 2025 23:31:46 +0000 Subject: [PATCH 1/4] [msan][test] Add another target("aarch64.svcount") test case This shows a crash that happens because MSan tries to check the shadow of target("aarch64.svcount"). This is the followup to https://github.com/llvm/llvm-project/pull/164315. This also does a drive-by fix of those test cases, to use 'not opt' instead of XFAIL. Forked from llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll --- .../AArch64/sme-aarch64-svcount-mini.ll | 4 +- .../AArch64/sme-aarch64-svcount.ll | 4 +- .../AArch64/sme2-intrinsics-add-mini.ll | 14 + .../AArch64/sme2-intrinsics-add.ll | 338 ++++++++++++++++++ 4 files changed, 354 insertions(+), 6 deletions(-) create mode 100644 llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll create mode 100644 llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll index 1ddcd4b56688c..a614219df08c3 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -passes=msan -mattr=+sme -o - %s | FileCheck %s - -; XFAIL: * +; RUN: not opt -S -passes=msan -mattr=+sme -o - %s 2>&1 ; Forked from llvm/test/CodeGen/AArch64/sme-aarch64-svcount.ll ; Manually minimized to show MSan leads to a compiler crash diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll index 9caa89de63748..6d6f82df87a85 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll @@ -1,7 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -passes=msan -mattr=+sme -o - %s | FileCheck %s - -; XFAIL: * +; RUN: not opt -S -passes=msan -mattr=+sme -o - %s ; Forked from llvm/test/CodeGen/AArch64/sme-aarch64-svcount.ll diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll new file mode 100644 index 0000000000000..acb19e370dbb7 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll @@ -0,0 +1,14 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: not opt -print-on-crash -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s + +; Forked from llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll +; Manually reduced to show MSan leads to a compiler crash + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-android9001" + +define void @multi_vector_add_za_vg1x4_f32_tuple(i64 %stride, ptr %ptr) sanitize_memory { + %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() + %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %ptr) + ret void +} diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll new file mode 100644 index 0000000000000..ec75fe385a0a5 --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll @@ -0,0 +1,338 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: not opt -print-on-crash -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s + +; Forked from llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" +target triple = "aarch64--linux-android9001" + +define void @multi_vector_add_write_single_za_vg1x2_i32(i32 %slice, %zn0, %zn1, %zm) sanitize_memory { + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32 %slice, + %zn0, %zn1, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zm) + ret void +} + +define void @multi_vector_add_write_single_za_vg1x2_i64(i32 %slice, %zn0, %zn1, %zm) sanitize_memory { + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32 %slice, + %zn0, %zn1, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zm) + ret void +} + + +define void @multi_vector_add_write_single_za_vg1x4_i32(i32 %slice, %zn0, %zn1, + %zn2, %zn3, + %zm) sanitize_memory { + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm) + ret void +} + +define void @multi_vector_add_write_single_za_vg1x4_i64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm) sanitize_memory { + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm) + ret void +} + + +define void @multi_vector_add_write_za_vg1x2_i32(i32 %slice, %zn0, %zn1, + %zm1, %zm2) sanitize_memory { + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32 %slice, + %zn0, %zn1, + %zm1, %zm2) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zm1, %zm2) + ret void +} + + +define void @multi_vector_add_write_za_vg1x2_i64(i32 %slice, %zn0, %zn1, + %zm1, %zm2) sanitize_memory { + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32 %slice, + %zn0, %zn1, + %zm1, %zm2) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zm1, %zm2) + ret void +} + + + +define void @multi_vector_add_write_za_vg1x4_i32(i32 %slice, %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) sanitize_memory { + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + ret void +} + +define void @multi_vector_add_write_za_vg1x4_i64(i32 %slice, %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) sanitize_memory { + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3, + %zm0, %zm1, + %zm2, %zm3) + ret void +} + +define void @multi_vector_add_za_vg1x2_i32(i32 %slice, %zn0, %zn1) sanitize_memory { + call void @llvm.aarch64.sme.add.za32.vg1x2.nxv4i32(i32 %slice, %zn0, %zn1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za32.vg1x2.nxv4i32(i32 %slice.7, %zn0, %zn1) + ret void +} + +define void @multi_vector_add_za_vg1x2_i64(i32 %slice, %zn0, %zn1) sanitize_memory { + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2i64(i32 %slice, %zn0, %zn1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2i64(i32 %slice.7, %zn0, %zn1) + ret void +} + +define void @multi_vector_add_za_vg1x2_f32(i32 %slice, %zn0, %zn1) sanitize_memory { + call void @llvm.aarch64.sme.add.za32.vg1x2.nxv4f32(i32 %slice, + %zn0, %zn1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za32.vg1x2.nxv4f32(i32 %slice.7, + %zn0, %zn1) + ret void +} + +define void @multi_vector_add_za_vg1x2_f64(i32 %slice, %zn0, %zn1) sanitize_memory { + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32 %slice, + %zn0, %zn1) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32 %slice.7, + %zn0, %zn1) + ret void +} + +define void @multi_vector_add_za_vg1x2_f64_tuple(i64 %stride, ptr %ptr) sanitize_memory { +entry: + %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() + %1 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2f64(target("aarch64.svcount") %0, ptr %ptr) + %2 = extractvalue { , } %1, 0 + %3 = extractvalue { , } %1, 1 + %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride + %4 = tail call { , } @llvm.aarch64.sve.ld1.pn.x2.nxv2f64(target("aarch64.svcount") %0, ptr %arrayidx2) + %5 = extractvalue { , } %4, 0 + %6 = extractvalue { , } %4, 1 + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32 0, %2, %5) + call void @llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32 0, %3, %6) + ret void +} + + +define void @multi_vector_add_za_vg1x4_i32(i32 %slice, %zn0, %zn1, %zn2, %zn3) sanitize_memory { + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4i32(i32 %slice, + %zn0, %zn1, + %zn2, %zn3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4i32(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3) + ret void +} + +define void @multi_vector_add_za_vg1x4_i64(i32 %slice, %zn0, %zn1, %zn2, %zn3) sanitize_memory { + call void @llvm.aarch64.sme.add.za64.vg1x4.nxv2i64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za64.vg1x4.nxv2i64(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3) + ret void +} + +define void @multi_vector_add_za_vg1x4_f32(i32 %slice, %zn0, %zn1, %zn2, %zn3) sanitize_memory { + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 %slice, + %zn0, %zn1, + %zn2, %zn3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3) + ret void +} + +define void @multi_vector_add_za_vg1x4_f32_tuple(i64 %stride, ptr %ptr) sanitize_memory { +entry: + %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() + %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %ptr) + %2 = extractvalue { , , , } %1, 0 + %3 = extractvalue { , , , } %1, 1 + %4 = extractvalue { , , , } %1, 2 + %5 = extractvalue { , , , } %1, 3 + %arrayidx2 = getelementptr inbounds i8, ptr %ptr, i64 %stride + %6 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx2) + %7 = extractvalue { , , , } %6, 0 + %8 = extractvalue { , , , } %6, 1 + %9 = extractvalue { , , , } %6, 2 + %10 = extractvalue { , , , } %6, 3 + %mul3 = shl i64 %stride, 1 + %arrayidx4 = getelementptr inbounds i8, ptr %ptr, i64 %mul3 + %11 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx4) + %12 = extractvalue { , , , } %11, 0 + %13 = extractvalue { , , , } %11, 1 + %14 = extractvalue { , , , } %11, 2 + %15 = extractvalue { , , , } %11, 3 + %mul5 = mul i64 %stride, 3 + %arrayidx6 = getelementptr inbounds i8, ptr %ptr, i64 %mul5 + %16 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %arrayidx6) + %17 = extractvalue { , , , } %16, 0 + %18 = extractvalue { , , , } %16, 1 + %19 = extractvalue { , , , } %16, 2 + %20 = extractvalue { , , , } %16, 3 + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 0, %2, %7, %12, %17) + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 0, %3, %8, %13, %18) + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 0, %4, %9, %14, %19) + call void @llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32 0, %5, %10, %15, %20) + ret void +} + +define void @multi_vector_add_za_vg1x4_f64(i32 %slice, %zn0, %zn1, %zn2, %zn3) sanitize_memory { + call void @llvm.aarch64.sme.add.za64.vg1x4.nxv2f64(i32 %slice, + %zn0, %zn1, + %zn2, %zn3) + %slice.7 = add i32 %slice, 7 + call void @llvm.aarch64.sme.add.za64.vg1x4.nxv2f64(i32 %slice.7, + %zn0, %zn1, + %zn2, %zn3) + ret void +} + + +define { , } @multi_vec_add_single_x2_s8( %unused, %zdn1, %zdn2, %zm) sanitize_memory { + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv16i8( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s16( %unused, %zdn1, %zdn2, %zm) sanitize_memory { + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv8i16( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s32( %unused, %zdn1, %zdn2, %zm) sanitize_memory { + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv4i32( %zdn1, %zdn2, + %zm) + ret { , } %res +} + +define { , } @multi_vec_add_single_x2_s64( %unused, %zdn1, %zdn2, %zm) sanitize_memory { + %res = call { , } + @llvm.aarch64.sve.add.single.x2.nxv2i64( %zdn1, %zdn2, + %zm) + ret { , } %res +} + + +define { , , , } @multi_vec_add_single_x4_s8( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) sanitize_memory { + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv16i8( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s16( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) sanitize_memory { + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv8i16( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s32( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) sanitize_memory { + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv4i32( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} + +define { , , , } @multi_vec_add_x4_single_s64( %unused, %zdn1, %zdn2, %zdn3, %zdn4, %zm) sanitize_memory { + %res = call { , , , } + @llvm.aarch64.sve.add.single.x4.nxv2i64( %zdn1, %zdn2, + %zdn3, %zdn4, + %zm) + ret { , , , } %res +} +declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv4i32(i32, , , ) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x2.nxv2i64(i32, , , ) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x4.nxv4i32(i32, , , , , ) +declare void@llvm.aarch64.sme.add.write.single.za.vg1x4.nxv2i64(i32, , , , , ) +declare void@llvm.aarch64.sme.add.write.za.vg1x2.nxv4i32(i32, , , , ) +declare void@llvm.aarch64.sme.add.write.za.vg1x2.nxv2i64(i32, , , , ) +declare void@llvm.aarch64.sme.add.write.za.vg1x4.nxv4i32(i32, , , , , , , , ) +declare void@llvm.aarch64.sme.add.write.za.vg1x4.nxv2i64(i32, , , , , , , , ) +declare void@llvm.aarch64.sme.add.za32.vg1x2.nxv4i32(i32, ,) +declare void@llvm.aarch64.sme.add.za64.vg1x2.nxv2i64(i32, ,) +declare void@llvm.aarch64.sme.add.za32.vg1x4.nxv4i32(i32, ,,,) +declare void@llvm.aarch64.sme.add.za64.vg1x4.nxv2i64(i32, ,,, ) +declare void@llvm.aarch64.sme.add.za32.vg1x2.nxv4f32(i32, , ) +declare void@llvm.aarch64.sme.add.za64.vg1x2.nxv2f64(i32, , ) +declare void@llvm.aarch64.sme.add.za32.vg1x4.nxv4f32(i32, , ,, ) +declare void@llvm.aarch64.sme.add.za64.vg1x4.nxv2f64(i32, , ,, ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv16i8(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv8i16(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv4i32(, , ) +declare { , } @llvm.aarch64.sve.add.single.x2.nxv2i64(, , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv16i8(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv8i16(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv4i32(, , , , ) +declare { , , , } @llvm.aarch64.sve.add.single.x4.nxv2i64(, , , , ) From c122180d0998e20b4f3e5e743ae691bc634c1e32 Mon Sep 17 00:00:00 2001 From: Thurston Dang Date: Tue, 21 Oct 2025 00:05:20 +0000 Subject: [PATCH 2/4] Fix IR --- .../MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll index acb19e370dbb7..e7f7c88a1ca25 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: not opt -print-on-crash -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s +; RUN: not opt -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s ; Forked from llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll ; Manually reduced to show MSan leads to a compiler crash @@ -8,7 +8,7 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" target triple = "aarch64--linux-android9001" define void @multi_vector_add_za_vg1x4_f32_tuple(i64 %stride, ptr %ptr) sanitize_memory { - %0 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() - %1 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %0, ptr %ptr) + %1 = tail call target("aarch64.svcount") @llvm.aarch64.sve.ptrue.c8() + %2 = tail call { , , , } @llvm.aarch64.sve.ld1.pn.x4.nxv4f32(target("aarch64.svcount") %1, ptr %ptr) ret void } From 0255ad0bdaadbd49d1d5aca150e5d31b3ca401a7 Mon Sep 17 00:00:00 2001 From: Thurston Dang Date: Tue, 21 Oct 2025 00:09:16 +0000 Subject: [PATCH 3/4] You shall not pass --- .../MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll | 4 +++- .../MemorySanitizer/AArch64/sme-aarch64-svcount.ll | 4 +++- .../MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll | 4 +++- .../MemorySanitizer/AArch64/sme2-intrinsics-add.ll | 4 +++- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll index a614219df08c3..d441624519de4 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll @@ -1,5 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: not opt -S -passes=msan -mattr=+sme -o - %s 2>&1 +; RUN: opt -S -passes=msan -mattr=+sme -o - %s 2>&1 + +; XFAIL: * ; Forked from llvm/test/CodeGen/AArch64/sme-aarch64-svcount.ll ; Manually minimized to show MSan leads to a compiler crash diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll index 6d6f82df87a85..00cf3204464d0 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount.ll @@ -1,5 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: not opt -S -passes=msan -mattr=+sme -o - %s +; RUN: opt -S -passes=msan -mattr=+sme -o - %s + +; XFAIL: * ; Forked from llvm/test/CodeGen/AArch64/sme-aarch64-svcount.ll diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll index e7f7c88a1ca25..3f43efa233621 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add-mini.ll @@ -1,5 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: not opt -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s +; RUN: opt -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s + +; XFAIL: * ; Forked from llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll ; Manually reduced to show MSan leads to a compiler crash diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll index ec75fe385a0a5..cd04373c11d20 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme2-intrinsics-add.ll @@ -1,5 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: not opt -print-on-crash -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s +; RUN: opt -S -passes=msan -mattr=+sme2 -mattr=+sme-i16i64 -mattr=+sme-f64f64 -o - %s + +; XFAIL: * ; Forked from llvm/test/CodeGen/AArch64/sme2-intrinsics-add.ll From f4fb837f1b31409bb8ee8aed62fce3cdcc0472cd Mon Sep 17 00:00:00 2001 From: Thurston Dang Date: Tue, 21 Oct 2025 20:51:38 +0000 Subject: [PATCH 4/4] Remove 2>&1 --- .../MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll index d441624519de4..1c869bd41b931 100644 --- a/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll +++ b/llvm/test/Instrumentation/MemorySanitizer/AArch64/sme-aarch64-svcount-mini.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt -S -passes=msan -mattr=+sme -o - %s 2>&1 +; RUN: opt -S -passes=msan -mattr=+sme -o - %s ; XFAIL: *