diff --git a/llvm/test/CodeGen/X86/fma-fneg-combine.ll b/llvm/test/CodeGen/X86/fma-fneg-combine.ll index a67595c1e95a2c..9e47548f27f14a 100644 --- a/llvm/test/CodeGen/X86/fma-fneg-combine.ll +++ b/llvm/test/CodeGen/X86/fma-fneg-combine.ll @@ -11,7 +11,7 @@ define <16 x float> @test1(<16 x float> %a, <16 x float> %b, <16 x float> %c) { ; CHECK-NEXT: vfmsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) - zmm2 ; CHECK-NEXT: retq entry: - %sub.i = fsub <16 x float> , %c + %sub.i = fsub <16 x float> , %c %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 4) #2 ret <16 x float> %0 } @@ -28,7 +28,7 @@ define <16 x float> @test2(<16 x float> %a, <16 x float> %b, <16 x float> %c) { ; CHECK-NEXT: retq entry: %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i32 4) #2 - %sub.i = fsub <16 x float> , %0 + %sub.i = fsub <16 x float> , %0 ret <16 x float> %sub.i } @@ -39,7 +39,7 @@ define <16 x float> @test3(<16 x float> %a, <16 x float> %b, <16 x float> %c) { ; CHECK-NEXT: retq entry: %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 -1, i32 4) #2 - %sub.i = fsub <16 x float> , %0 + %sub.i = fsub <16 x float> , %0 ret <16 x float> %sub.i } @@ -50,7 +50,7 @@ define <16 x float> @test4(<16 x float> %a, <16 x float> %b, <16 x float> %c) { ; CHECK-NEXT: retq entry: %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 -1, i32 4) #2 - %sub.i = fsub <16 x float> , %0 + %sub.i = fsub <16 x float> , %0 ret <16 x float> %sub.i } @@ -60,7 +60,7 @@ define <16 x float> @test5(<16 x float> %a, <16 x float> %b, <16 x float> %c) { ; CHECK-NEXT: vfmsub213ps {ru-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq entry: - %sub.i = fsub <16 x float> , %c + %sub.i = fsub <16 x float> , %c %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 10) #2 ret <16 x float> %0 } @@ -72,7 +72,7 @@ define <16 x float> @test6(<16 x float> %a, <16 x float> %b, <16 x float> %c) { ; CHECK-NEXT: retq entry: %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 -1, i32 10) #2 - %sub.i = fsub <16 x float> , %0 + %sub.i = fsub <16 x float> , %0 ret <16 x float> %sub.i } @@ -84,7 +84,7 @@ define <8 x float> @test7(<8 x float> %a, <8 x float> %b, <8 x float> %c) { ; CHECK-NEXT: retq entry: %0 = tail call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2 - %sub.i = fsub <8 x float> , %0 + %sub.i = fsub <8 x float> , %0 ret <8 x float> %sub.i } @@ -94,7 +94,7 @@ define <8 x float> @test8(<8 x float> %a, <8 x float> %b, <8 x float> %c) { ; CHECK-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 ; CHECK-NEXT: retq entry: - %sub.c = fsub <8 x float> , %c + %sub.c = fsub <8 x float> , %c %0 = tail call <8 x float> @llvm.x86.fma.vfmsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %sub.c) #2 ret <8 x float> %0 } @@ -109,7 +109,7 @@ define <8 x double> @test9(<8 x double> %a, <8 x double> %b, <8 x double> %c) { ; CHECK-NEXT: retq entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i32 4) #2 - %sub.i = fsub <8 x double> , %0 + %sub.i = fsub <8 x double> , %0 ret <8 x double> %sub.i } @@ -123,7 +123,7 @@ define <2 x double> @test10(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-NEXT: retq entry: %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 -1, i32 4) #2 - %sub.i = fsub <2 x double> , %0 + %sub.i = fsub <2 x double> , %0 ret <2 x double> %sub.i } @@ -149,7 +149,7 @@ define <4 x float> @test11(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL-NEXT: vmovaps %xmm3, %xmm0 ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %c + %sub.i = fsub <4 x float> , %c %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i, i8 %mask, i32 4) #10 ret <4 x float> %0 } @@ -169,7 +169,7 @@ define <4 x float> @test11b(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 z ; KNL-NEXT: vfmsub213ss {{.*#+}} xmm0 {%k1} = (xmm1 * xmm0) - xmm2 ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %c + %sub.i = fsub <4 x float> , %c %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i, i8 %mask, i32 4) #10 ret <4 x float> %0 } @@ -194,7 +194,7 @@ entry: %0 = tail call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %c, i32 4) #2 %bc = bitcast i8 %mask to <8 x i1> %sel = select <8 x i1> %bc, <8 x double> %0, <8 x double> %a - %sub.i = fsub <8 x double> , %sel + %sub.i = fsub <8 x double> , %sel ret <8 x double> %sub.i } @@ -218,7 +218,7 @@ define <2 x double> @test13(<2 x double> %a, <2 x double> %b, <2 x double> %c, i ; KNL-NEXT: retq entry: - %sub.i = fsub <2 x double> , %a + %sub.i = fsub <2 x double> , %a %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %sub.i, <2 x double> %b, <2 x double> %c, i8 %mask, i32 4) ret <2 x double> %0 } @@ -239,7 +239,7 @@ define <16 x float> @test14(<16 x float> %a, <16 x float> %b, <16 x float> %c, i ; KNL-NEXT: retq entry: %0 = tail call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 10) #2 - %sub.i = fsub <16 x float> , %0 + %sub.i = fsub <16 x float> , %0 ret <16 x float> %sub.i } @@ -265,7 +265,7 @@ define <16 x float> @test15(<16 x float> %a, <16 x float> %b, <16 x float> %c, i ; KNL-NEXT: retq entry: %bc = bitcast i16 %mask to <16 x i1> - %sub.i = fsub <16 x float> , %a + %sub.i = fsub <16 x float> , %a %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sub.i, <16 x float> %b, <16 x float> %c, i32 10) %sel = select <16 x i1> %bc, <16 x float> %0, <16 x float> %sub.i %1 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %sel, <16 x float> %sub.i, <16 x float> %c, i32 9) @@ -285,7 +285,7 @@ define <16 x float> @test16(<16 x float> %a, <16 x float> %b, <16 x float> %c, i ; KNL-NEXT: kmovw %edi, %k1 ; KNL-NEXT: vfmsubadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} ; KNL-NEXT: retq - %sub.i = fsub <16 x float> , %c + %sub.i = fsub <16 x float> , %c %res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %sub.i, i32 9) %bc = bitcast i16 %mask to <16 x i1> %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %a @@ -305,7 +305,7 @@ define <8 x double> @test17(<8 x double> %a, <8 x double> %b, <8 x double> %c, i ; KNL-NEXT: kmovw %edi, %k1 ; KNL-NEXT: vfmsubadd132pd {{.*#+}} zmm0 {%k1} = (zmm0 * zmm1) -/+ zmm2 ; KNL-NEXT: retq - %sub.i = fsub <8 x double> , %c + %sub.i = fsub <8 x double> , %c %res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %a, <8 x double> %b, <8 x double> %sub.i, i32 4) %bc = bitcast i8 %mask to <8 x i1> %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %a @@ -326,7 +326,7 @@ define <4 x float> @test18(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL-NEXT: vfnmadd213ss {{.*#+}} xmm0 {%k1} = -(xmm1 * xmm0) + xmm2 ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %b + %sub.i = fsub <4 x float> , %b %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %sub.i, <4 x float> %c, i8 %mask, i32 4) #10 ret <4 x float> %0 } @@ -344,8 +344,8 @@ define <4 x float> @test19(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL-NEXT: vfnmsub213ss {{.*#+}} xmm0 {%k1} = -(xmm1 * xmm0) - xmm2 ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %b - %sub.i.2 = fsub <4 x float> , %c + %sub.i = fsub <4 x float> , %b + %sub.i.2 = fsub <4 x float> , %c %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %sub.i, <4 x float> %sub.i.2, i8 %mask, i32 4) #10 ret <4 x float> %0 } @@ -365,7 +365,7 @@ define <4 x float> @test20(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL-NEXT: vmovaps %xmm2, %xmm0 ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %b + %sub.i = fsub <4 x float> , %b %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %a, <4 x float> %sub.i, <4 x float> %c, i8 %mask, i32 4) #10 ret <4 x float> %0 } @@ -383,7 +383,7 @@ define <4 x float> @test21(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL-NEXT: vfnmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %b + %sub.i = fsub <4 x float> , %b %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %sub.i, <4 x float> %c, i8 %mask, i32 8) #10 ret <4 x float> %0 } @@ -401,8 +401,8 @@ define <4 x float> @test22(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL-NEXT: vfnmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %b - %sub.i.2 = fsub <4 x float> , %c + %sub.i = fsub <4 x float> , %b + %sub.i.2 = fsub <4 x float> , %c %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %sub.i, <4 x float> %sub.i.2, i8 %mask, i32 8) #10 ret <4 x float> %0 } @@ -422,7 +422,7 @@ define <4 x float> @test23(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL-NEXT: vmovaps %xmm2, %xmm0 ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %b + %sub.i = fsub <4 x float> , %b %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %a, <4 x float> %sub.i, <4 x float> %c, i8 %mask, i32 8) #10 ret <4 x float> %0 } @@ -440,7 +440,7 @@ define <4 x float> @test24(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 ze ; KNL-NEXT: vfmsub213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; KNL-NEXT: retq entry: - %sub.i = fsub <4 x float> , %c + %sub.i = fsub <4 x float> , %c %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %a, <4 x float> %b, <4 x float> %sub.i, i8 %mask, i32 8) #10 ret <4 x float> %0 } @@ -451,8 +451,8 @@ define <16 x float> @test25(<16 x float> %a, <16 x float> %b, <16 x float> %c) ; CHECK-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm1, %zmm0 ; CHECK-NEXT: retq entry: - %sub.i = fsub <16 x float> , %b - %sub.i.2 = fsub <16 x float> , %c + %sub.i = fsub <16 x float> , %b + %sub.i.2 = fsub <16 x float> , %c %0 = tail call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %a, <16 x float> %sub.i, <16 x float> %sub.i.2, i32 8) #2 ret <16 x float> %0 }