diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 57cf66e5275e3..633094df297d6 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -45188,6 +45188,7 @@ bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode( case X86ISD::PSHUFD: case X86ISD::UNPCKL: case X86ISD::UNPCKH: + case X86ISD::VPERMILPV: case X86ISD::VPERMILPI: case X86ISD::VPERMV3: { SmallVector Mask; @@ -45254,6 +45255,7 @@ bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode( case X86ISD::PSHUFD: case X86ISD::UNPCKL: case X86ISD::UNPCKH: + case X86ISD::VPERMILPV: case X86ISD::VPERMILPI: case X86ISD::VPERMV3: return false; diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll index c9d9db6cc9578..3279a50a1265b 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll @@ -373,8 +373,6 @@ define <8 x float> @constant_fold_vpermilvar_ps_256() { define <8 x float> @freeze_vpermilvar_ps_256(<8 x float> %a0) { ; CHECK-LABEL: freeze_vpermilvar_ps_256: ; CHECK: # %bb.0: -; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,3,1,2,7,6,5,4] -; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,3,1,7,6,5,4] ; CHECK-NEXT: ret{{[l|q]}} %s0 = call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> ) %f0 = freeze <8 x float> %s0