From de0f8c265cef662a6acc6d09823d1d461a1bc8b9 Mon Sep 17 00:00:00 2001 From: XinWang10 Date: Thu, 17 Aug 2023 23:01:50 -0700 Subject: [PATCH] [X86]Support options -mno-gather -mno-scatter Gather instructions could lead to security issues, details please refer to https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/gather-data-sampling.html. This supported options -mno-gather and -mno-scatter, which could avoid generating gather/scatter instructions in backend except using intrinsics or inline asms. Reviewed By: pengfei Differential Revision: https://reviews.llvm.org/D157680 --- clang/include/clang/Driver/Options.td | 8 + clang/lib/Driver/ToolChains/Arch/X86.cpp | 6 + .../test/Driver/x86-no-gather-no-scatter.cpp | 8 + llvm/lib/Target/X86/X86.td | 7 + .../lib/Target/X86/X86TargetTransformInfo.cpp | 14 +- llvm/lib/Target/X86/X86TargetTransformInfo.h | 1 + .../X86/x86-prefer-no-gather-no-scatter.ll | 199 ++++++++++++++++++ 7 files changed, 238 insertions(+), 5 deletions(-) create mode 100644 clang/test/Driver/x86-no-gather-no-scatter.cpp create mode 100644 llvm/test/CodeGen/X86/x86-prefer-no-gather-no-scatter.ll diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td index e2438edc8f0f4..e04f67bdb1fa5 100644 --- a/clang/include/clang/Driver/Options.td +++ b/clang/include/clang/Driver/Options.td @@ -5092,6 +5092,10 @@ def mretpoline_external_thunk : Flag<["-"], "mretpoline-external-thunk">, Group< def mno_retpoline_external_thunk : Flag<["-"], "mno-retpoline-external-thunk">, Group; def mvzeroupper : Flag<["-"], "mvzeroupper">, Group; def mno_vzeroupper : Flag<["-"], "mno-vzeroupper">, Group; +def mno_gather : Flag<["-"], "mno-gather">, Group, + HelpText<"Disable generation of gather instructions in auto-vectorization(x86 only)">; +def mno_scatter : Flag<["-"], "mno-scatter">, Group, + HelpText<"Disable generation of scatter instructions in auto-vectorization(x86 only)">; // These are legacy user-facing driver-level option spellings. They are always // aliases for options that are spelled using the more common Unix / GNU flag @@ -7148,6 +7152,10 @@ def _SLASH_QIntel_jcc_erratum : CLFlag<"QIntel-jcc-erratum">, Alias; def _SLASH_arm64EC : CLFlag<"arm64EC">, HelpText<"Set build target to arm64ec">; +def : CLFlag<"Qgather-">, Alias, + HelpText<"Disable generation of gather instructions in auto-vectorization(x86 only)">; +def : CLFlag<"Qscatter-">, Alias, + HelpText<"Disable generation of scatter instructions in auto-vectorization(x86 only)">; // Non-aliases: diff --git a/clang/lib/Driver/ToolChains/Arch/X86.cpp b/clang/lib/Driver/ToolChains/Arch/X86.cpp index 286bac2e7a2b6..cf2bc63d74ada 100644 --- a/clang/lib/Driver/ToolChains/Arch/X86.cpp +++ b/clang/lib/Driver/ToolChains/Arch/X86.cpp @@ -267,4 +267,10 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple, << A->getSpelling() << Scope; } } + + // -mno-gather, -mno-scatter support + if (Args.hasArg(options::OPT_mno_gather)) + Features.push_back("+prefer-no-gather"); + if (Args.hasArg(options::OPT_mno_scatter)) + Features.push_back("+prefer-no-scatter"); } diff --git a/clang/test/Driver/x86-no-gather-no-scatter.cpp b/clang/test/Driver/x86-no-gather-no-scatter.cpp new file mode 100644 index 0000000000000..7efcc55787c42 --- /dev/null +++ b/clang/test/Driver/x86-no-gather-no-scatter.cpp @@ -0,0 +1,8 @@ +/// Tests -mno-gather and -mno-scatter +// RUN: %clang -c -mno-gather -### %s 2>&1 | FileCheck --check-prefix=NOGATHER %s +// RUN: %clang_cl -c /Qgather- -### %s 2>&1 | FileCheck --check-prefix=NOGATHER %s +// NOGATHER: "-target-feature" "+prefer-no-gather" + +// RUN: %clang -c -mno-scatter -### %s 2>&1 | FileCheck --check-prefix=NOSCATTER %s +// RUN: %clang_cl -c /Qscatter- -### %s 2>&1 | FileCheck --check-prefix=NOSCATTER %s +// NOSCATTER: "-target-feature" "+prefer-no-scatter" diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td index 0f677b8a4afc1..05cc50712c526 100644 --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -659,6 +659,13 @@ def TuningFastGather : SubtargetFeature<"fast-gather", "HasFastGather", "true", "Indicates if gather is reasonably fast (this is true for Skylake client and all AVX-512 CPUs)">; +def TuningPreferNoGather + : SubtargetFeature<"prefer-no-gather", "PreferGather", "false", + "Prefer no gather instructions">; +def TuningPreferNoScatter + : SubtargetFeature<"prefer-no-scatter", "PreferScatter", "false", + "Prefer no scatter instructions">; + def TuningPrefer128Bit : SubtargetFeature<"prefer-128-bit", "Prefer128Bit", "true", "Prefer 128-bit AVX instructions">; diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 17981b3b9374a..129a2646dbb77 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -5944,9 +5944,7 @@ bool X86TTIImpl::forceScalarizeMaskedGather(VectorType *VTy, Align Alignment) { (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX()))); } -bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { - if (!supportsGather()) - return false; +bool X86TTIImpl::isLegalMaskedGatherScatter(Type *DataTy, Align Alignment) { Type *ScalarTy = DataTy->getScalarType(); if (ScalarTy->isPointerTy()) return true; @@ -5961,6 +5959,12 @@ bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { return IntWidth == 32 || IntWidth == 64; } +bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { + if (!supportsGather() || !ST->preferGather()) + return false; + return isLegalMaskedGatherScatter(DataTy, Alignment); +} + bool X86TTIImpl::isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, unsigned Opcode1, const SmallBitVector &OpcodeMask) const { @@ -5996,9 +6000,9 @@ bool X86TTIImpl::isLegalAltInstr(VectorType *VecTy, unsigned Opcode0, bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { // AVX2 doesn't support scatter - if (!ST->hasAVX512()) + if (!ST->hasAVX512() || !ST->preferScatter()) return false; - return isLegalMaskedGather(DataType, Alignment); + return isLegalMaskedGatherScatter(DataType, Alignment); } bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.h b/llvm/lib/Target/X86/X86TargetTransformInfo.h index 89c7916260a45..0fa0d240a548b 100644 --- a/llvm/lib/Target/X86/X86TargetTransformInfo.h +++ b/llvm/lib/Target/X86/X86TargetTransformInfo.h @@ -261,6 +261,7 @@ class X86TTIImpl : public BasicTTIImplBase { bool forceScalarizeMaskedScatter(VectorType *VTy, Align Alignment) { return forceScalarizeMaskedGather(VTy, Alignment); } + bool isLegalMaskedGatherScatter(Type *DataType, Align Alignment); bool isLegalMaskedGather(Type *DataType, Align Alignment); bool isLegalMaskedScatter(Type *DataType, Align Alignment); bool isLegalMaskedExpandLoad(Type *DataType); diff --git a/llvm/test/CodeGen/X86/x86-prefer-no-gather-no-scatter.ll b/llvm/test/CodeGen/X86/x86-prefer-no-gather-no-scatter.ll new file mode 100644 index 0000000000000..e3f3622f146d9 --- /dev/null +++ b/llvm/test/CodeGen/X86/x86-prefer-no-gather-no-scatter.ll @@ -0,0 +1,199 @@ +; Check that if option prefer-no-gather/scatter can disable gather/scatter instructions. +; RUN: llc -mattr=+avx2,+fast-gather %s -o - | FileCheck %s --check-prefixes=GATHER +; RUN: llc -mattr=+avx2,+fast-gather,+prefer-no-gather %s -o - | FileCheck %s --check-prefixes=NO-GATHER +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512dq < %s | FileCheck %s --check-prefix=SCATTER +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512dq,+prefer-no-gather < %s | FileCheck %s --check-prefix=SCATTER-NO-GATHER +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512dq,+prefer-no-scatter < %s | FileCheck %s --check-prefix=GATHER-NO-SCATTER +; RUN: llc -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512vl,+avx512dq,+prefer-no-gather,+prefer-no-scatter < %s | FileCheck %s --check-prefix=NO-SCATTER-GATHER + +@A = global [1024 x i8] zeroinitializer, align 128 +@B = global [1024 x i64] zeroinitializer, align 128 +@C = global [1024 x i64] zeroinitializer, align 128 + +; This tests the function that if prefer-no-gather can disable lowerMGather +define void @test() #0 { +; GATHER-LABEL: test: +; GATHER: vpgatherdq +; +; NO-GATHER-LABEL: test: +; NO-GATHER-NOT: vpgatherdq +; +; GATHER-NO-SCATTER-LABEL: test: +; GATHER-NO-SCATTER: vpgatherdq +; +; NO-SCATTER-GATHER-LABEL: test: +; NO-SCATTER-GATHER-NOT: vpgatherdq +iter.check: + br i1 false, label %vec.epilog.scalar.ph, label %vector.main.loop.iter.check + +vector.main.loop.iter.check: ; preds = %iter.check + br i1 false, label %vec.epilog.ph, label %vector.ph + +vector.ph: ; preds = %vector.main.loop.iter.check + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %0 = add i64 %index, 0 + %1 = getelementptr inbounds [1024 x i8], ptr @A, i64 0, i64 %0 + %2 = getelementptr inbounds i8, ptr %1, i32 0 + %wide.load = load <32 x i8>, ptr %2, align 1 + %3 = sext <32 x i8> %wide.load to <32 x i64> + %4 = getelementptr inbounds [1024 x i64], ptr @B, i64 0, <32 x i64> %3 + %wide.masked.gather = call <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr> %4, i32 8, <32 x i1> , <32 x i64> poison) + %5 = getelementptr inbounds [1024 x i64], ptr @C, i64 0, i64 %0 + %6 = getelementptr inbounds i64, ptr %5, i32 0 + store <32 x i64> %wide.masked.gather, ptr %6, align 8 + %index.next = add nuw i64 %index, 32 + %7 = icmp eq i64 %index.next, 1024 + br i1 %7, label %middle.block, label %vector.body, !llvm.loop !0 + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 1024, 1024 + br i1 %cmp.n, label %for.cond.cleanup, label %vec.epilog.iter.check + +vec.epilog.iter.check: ; preds = %middle.block + br i1 true, label %vec.epilog.scalar.ph, label %vec.epilog.ph + +vec.epilog.ph: ; preds = %vector.main.loop.iter.check, %vec.epilog.iter.check + %vec.epilog.resume.val = phi i64 [ 1024, %vec.epilog.iter.check ], [ 0, %vector.main.loop.iter.check ] + br label %vec.epilog.vector.body + +vec.epilog.vector.body: ; preds = %vec.epilog.vector.body, %vec.epilog.ph + %index2 = phi i64 [ %vec.epilog.resume.val, %vec.epilog.ph ], [ %index.next5, %vec.epilog.vector.body ] + %8 = add i64 %index2, 0 + %9 = getelementptr inbounds [1024 x i8], ptr @A, i64 0, i64 %8 + %10 = getelementptr inbounds i8, ptr %9, i32 0 + %wide.load3 = load <16 x i8>, ptr %10, align 1 + %11 = sext <16 x i8> %wide.load3 to <16 x i64> + %12 = getelementptr inbounds [1024 x i64], ptr @B, i64 0, <16 x i64> %11 + %wide.masked.gather4 = call <16 x i64> @llvm.masked.gather.v16i64.v16p0(<16 x ptr> %12, i32 8, <16 x i1> , <16 x i64> poison) + %13 = getelementptr inbounds [1024 x i64], ptr @C, i64 0, i64 %8 + %14 = getelementptr inbounds i64, ptr %13, i32 0 + store <16 x i64> %wide.masked.gather4, ptr %14, align 8 + %index.next5 = add nuw i64 %index2, 16 + %15 = icmp eq i64 %index.next5, 1024 + br i1 %15, label %vec.epilog.middle.block, label %vec.epilog.vector.body, !llvm.loop !2 + +vec.epilog.middle.block: ; preds = %vec.epilog.vector.body + %cmp.n1 = icmp eq i64 1024, 1024 + br i1 %cmp.n1, label %for.cond.cleanup, label %vec.epilog.scalar.ph + +vec.epilog.scalar.ph: ; preds = %iter.check, %vec.epilog.iter.check, %vec.epilog.middle.block + %bc.resume.val = phi i64 [ 1024, %vec.epilog.middle.block ], [ 1024, %vec.epilog.iter.check ], [ 0, %iter.check ] + br label %for.body + +for.body: ; preds = %for.body, %vec.epilog.scalar.ph + %iv = phi i64 [ %bc.resume.val, %vec.epilog.scalar.ph ], [ %iv.next, %for.body ] + %inA = getelementptr inbounds [1024 x i8], ptr @A, i64 0, i64 %iv + %valA = load i8, ptr %inA, align 1 + %valA.ext = sext i8 %valA to i64 + %inB = getelementptr inbounds [1024 x i64], ptr @B, i64 0, i64 %valA.ext + %valB = load i64, ptr %inB, align 8 + %out = getelementptr inbounds [1024 x i64], ptr @C, i64 0, i64 %iv + store i64 %valB, ptr %out, align 8 + %iv.next = add nuw nsw i64 %iv, 1 + %cmp = icmp ult i64 %iv.next, 1024 + br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !4 + +for.cond.cleanup: ; preds = %vec.epilog.middle.block, %middle.block, %for.body + ret void +} + +declare <32 x i64> @llvm.masked.gather.v32i64.v32p0(<32 x ptr>, i32 immarg, <32 x i1>, <32 x i64>) #1 + +declare <16 x i64> @llvm.masked.gather.v16i64.v16p0(<16 x ptr>, i32 immarg, <16 x i1>, <16 x i64>) #1 +!0 = distinct !{!0, !1} +!1 = !{!"llvm.loop.isvectorized", i32 1} +!2 = distinct !{!2, !1, !3} +!3 = !{!"llvm.loop.unroll.runtime.disable"} +!4 = distinct !{!4, !3, !1} + +; This tests the function that if prefer-no-gather can disable ScalarizeMaskedGather +define <4 x float> @gather_v4f32_ptr_v4i32(<4 x ptr> %ptr, <4 x i32> %trigger, <4 x float> %passthru) { +; GATHER-LABEL: gather_v4f32_ptr_v4i32: +; GATHER: vgatherqps +; +; NO-GATHER-LABEL: gather_v4f32_ptr_v4i32: +; NO-GATHER-NOT: vgatherqps +; +; GATHER-NO-SCATTER-LABEL: gather_v4f32_ptr_v4i32: +; GATHER-NO-SCATTER: vgatherqps +; +; NO-SCATTER-GATHER-LABEL: gather_v4f32_ptr_v4i32: +; NO-SCATTER-GATHER-NOT: vgatherqps + %mask = icmp eq <4 x i32> %trigger, zeroinitializer + %res = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> %ptr, i32 4, <4 x i1> %mask, <4 x float> %passthru) + ret <4 x float> %res +} + +declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>) + +%struct.a = type { [4 x i32], [4 x i8], %struct.b, i32 } +%struct.b = type { i32, i32 } +@c = external dso_local global %struct.a, align 4 + +; This tests the function that if prefer-no-gather can disable ScalarizeMaskedGather +define <8 x i32> @gather_v8i32_v8i32(<8 x i32> %trigger) { +; GATHER-LABEL: gather_v8i32_v8i32: +; GATHER: vpgatherdd +; +; NO-GATHER-LABEL: gather_v8i32_v8i32: +; NO-GATHER-NOT: vpgatherdd +; +; NO-SCATTER-GATHER-LABEL: gather_v8i32_v8i32: +; NO-SCATTER-GATHER-NOT: vpgatherdd + %1 = icmp eq <8 x i32> %trigger, zeroinitializer + %2 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> getelementptr (%struct.a, <8 x ptr> , <8 x i64> zeroinitializer, i32 0, <8 x i64> ), i32 4, <8 x i1> %1, <8 x i32> undef) + %3 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> getelementptr (%struct.a, <8 x ptr> , <8 x i64> zeroinitializer, i32 3), i32 4, <8 x i1> %1, <8 x i32> undef) + %4 = add <8 x i32> %2, %3 + %5 = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> getelementptr (%struct.a, <8 x ptr> , <8 x i64> zeroinitializer, i32 3), i32 4, <8 x i1> %1, <8 x i32> undef) + %6 = add <8 x i32> %4, %5 + ret <8 x i32> %6 +} + +declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>) + +; scatter test cases +define void @scatter_test1(ptr %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) { +; SCATTER-LABEL: scatter_test1: +; SCATTER: vpscatterdd +; +; SCATTER-NO-GATHER-LABEL: scatter_test1: +; SCATTER-NO-GATHER: vpscatterdd +; +; GATHER-NO-SCATTER-LABEL: scatter_test1: +; GATHER-NO-SCATTER-NOT: vpscatterdd +; +; NO-SCATTER-GATHER-LABEL: scatter_test1: +; NO-SCATTER-GATHER-NOT: vpscatterdd + %broadcast.splatinsert = insertelement <16 x ptr> undef, ptr %base, i32 0 + %broadcast.splat = shufflevector <16 x ptr> %broadcast.splatinsert, <16 x ptr> undef, <16 x i32> zeroinitializer + + %gep.random = getelementptr i32, <16 x ptr> %broadcast.splat, <16 x i32> %ind + %imask = bitcast i16 %mask to <16 x i1> + call void @llvm.masked.scatter.v16i32.v16p0(<16 x i32>%val, <16 x ptr> %gep.random, i32 4, <16 x i1> %imask) + call void @llvm.masked.scatter.v16i32.v16p0(<16 x i32>%val, <16 x ptr> %gep.random, i32 4, <16 x i1> %imask) + ret void +} + +declare void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> , <8 x ptr> , i32 , <8 x i1> ) +declare void @llvm.masked.scatter.v16i32.v16p0(<16 x i32> , <16 x ptr> , i32 , <16 x i1> ) + +define <8 x i32> @scatter_test2(<8 x i32>%a1, <8 x ptr> %ptr) { +; SCATTER-LABEL: scatter_test2: +; SCATTER: vpscatterqd +; +; SCATTER-NO-GATHER-LABEL: scatter_test2: +; SCATTER-NO-GATHER: vpscatterqd +; +; GATHER-NO-SCATTER-LABEL: scatter_test2: +; GATHER-NO-SCATTER-NOT: vpscatterqd +; +; NO-SCATTER-GATHER-LABEL: scatter_test2: +; NO-SCATTER-GATHER-NOT: vpscatterqd + %a = call <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr> %ptr, i32 4, <8 x i1> , <8 x i32> undef) + + call void @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %a1, <8 x ptr> %ptr, i32 4, <8 x i1> ) + ret <8 x i32>%a +}