From 04347d834cf9c330378074573d55c5817cbb183d Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 1 Dec 2025 15:46:52 +0000 Subject: [PATCH] [X86] combineConcatVectorOps - add handling to concat fp rounding intrinsics together --- llvm/lib/Target/X86/X86ISelLowering.cpp | 16 ++++ llvm/test/CodeGen/X86/combine-fceil.ll | 90 ++++++++++++--------- llvm/test/CodeGen/X86/combine-fnearbyint.ll | 90 ++++++++++++--------- llvm/test/CodeGen/X86/combine-frint.ll | 90 ++++++++++++--------- llvm/test/CodeGen/X86/combine-froundeven.ll | 90 ++++++++++++--------- llvm/test/CodeGen/X86/combine-ftrunc.ll | 90 ++++++++++++--------- llvm/test/CodeGen/X86/combine-rndscale.ll | 90 ++++++++++++--------- 7 files changed, 340 insertions(+), 216 deletions(-) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 74a02711bd98a..539b238d5043f 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -59459,6 +59459,11 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, } break; case ISD::FSQRT: + case ISD::FCEIL: + case ISD::FTRUNC: + case ISD::FRINT: + case ISD::FNEARBYINT: + case ISD::FROUNDEVEN: if (!IsSplat && (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.useAVX512Regs()))) { return DAG.getNode(Opcode, DL, VT, ConcatSubOperand(VT, Ops, 0)); @@ -59470,6 +59475,17 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT, return DAG.getNode(Opcode, DL, VT, ConcatSubOperand(VT, Ops, 0)); } break; + case X86ISD::VRNDSCALE: + if (!IsSplat && + (VT.is256BitVector() || + (VT.is512BitVector() && Subtarget.useAVX512Regs())) && + llvm::all_of(Ops, [Op0](SDValue Op) { + return Op0.getOperand(1) == Op.getOperand(1); + })) { + return DAG.getNode(Opcode, DL, VT, ConcatSubOperand(VT, Ops, 0), + Op0.getOperand(1)); + } + break; case X86ISD::HADD: case X86ISD::HSUB: case X86ISD::FHADD: diff --git a/llvm/test/CodeGen/X86/combine-fceil.ll b/llvm/test/CodeGen/X86/combine-fceil.ll index 78f1476a49152..a3f55e8f64b80 100644 --- a/llvm/test/CodeGen/X86/combine-fceil.ll +++ b/llvm/test/CodeGen/X86/combine-fceil.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 define <4 x double> @concat_ceil_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) { @@ -13,9 +13,9 @@ define <4 x double> @concat_ceil_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) ; ; AVX-LABEL: concat_ceil_v4f64_v2f64: ; AVX: # %bb.0: -; AVX-NEXT: vroundpd $10, %xmm0, %xmm0 -; AVX-NEXT: vroundpd $10, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundpd $10, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a1) @@ -32,9 +32,9 @@ define <8 x float> @concat_ceil_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) { ; ; AVX-LABEL: concat_ceil_v8f32_v4f32: ; AVX: # %bb.0: -; AVX-NEXT: vroundps $10, %xmm0, %xmm0 -; AVX-NEXT: vroundps $10, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundps $10, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a1) @@ -51,25 +51,34 @@ define <8 x double> @concat_ceil_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, ; SSE-NEXT: roundpd $10, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_ceil_v8f64_v2f64: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundpd $10, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundpd $10, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundpd $10, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundpd $10, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_ceil_v8f64_v2f64: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundpd $10, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundpd $10, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_ceil_v8f64_v2f64: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundpd $10, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundpd $10, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_ceil_v8f64_v2f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $10, %xmm0, %xmm0 -; AVX512-NEXT: vroundpd $10, %xmm1, %xmm1 -; AVX512-NEXT: vroundpd $10, %xmm2, %xmm2 -; AVX512-NEXT: vroundpd $10, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $10, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.ceil.v2f64(<2 x double> %a1) @@ -90,25 +99,34 @@ define <16 x float> @concat_ceil_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, ; SSE-NEXT: roundps $10, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_ceil_v16f32_v4f32: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundps $10, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundps $10, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundps $10, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundps $10, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_ceil_v16f32_v4f32: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundps $10, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundps $10, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_ceil_v16f32_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundps $10, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundps $10, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_ceil_v16f32_v4f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $10, %xmm0, %xmm0 -; AVX512-NEXT: vroundps $10, %xmm1, %xmm1 -; AVX512-NEXT: vroundps $10, %xmm2, %xmm2 -; AVX512-NEXT: vroundps $10, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $10, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %a1) @@ -137,9 +155,9 @@ define <8 x double> @concat_ceil_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) ; ; AVX512-LABEL: concat_ceil_v8f64_v4f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $10, %ymm0, %ymm0 -; AVX512-NEXT: vroundpd $10, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $10, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x double> @llvm.ceil.v4f64(<4 x double> %a0) %v1 = call <4 x double> @llvm.ceil.v4f64(<4 x double> %a1) @@ -164,9 +182,9 @@ define <16 x float> @concat_ceil_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) ; ; AVX512-LABEL: concat_ceil_v16f32_v8f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $10, %ymm0, %ymm0 -; AVX512-NEXT: vroundps $10, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $10, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <8 x float> @llvm.ceil.v8f32(<8 x float> %a0) %v1 = call <8 x float> @llvm.ceil.v8f32(<8 x float> %a1) diff --git a/llvm/test/CodeGen/X86/combine-fnearbyint.ll b/llvm/test/CodeGen/X86/combine-fnearbyint.ll index 14d1017aec630..fde136af7c4c2 100644 --- a/llvm/test/CodeGen/X86/combine-fnearbyint.ll +++ b/llvm/test/CodeGen/X86/combine-fnearbyint.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 define <4 x double> @concat_nearbyint_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) { @@ -13,9 +13,9 @@ define <4 x double> @concat_nearbyint_v4f64_v2f64(<2 x double> %a0, <2 x double> ; ; AVX-LABEL: concat_nearbyint_v4f64_v2f64: ; AVX: # %bb.0: -; AVX-NEXT: vroundpd $12, %xmm0, %xmm0 -; AVX-NEXT: vroundpd $12, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundpd $12, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a1) @@ -32,9 +32,9 @@ define <8 x float> @concat_nearbyint_v8f32_v4f32(<4 x float> %a0, <4 x float> %a ; ; AVX-LABEL: concat_nearbyint_v8f32_v4f32: ; AVX: # %bb.0: -; AVX-NEXT: vroundps $12, %xmm0, %xmm0 -; AVX-NEXT: vroundps $12, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundps $12, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a1) @@ -51,25 +51,34 @@ define <8 x double> @concat_nearbyint_v8f64_v2f64(<2 x double> %a0, <2 x double> ; SSE-NEXT: roundpd $12, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_nearbyint_v8f64_v2f64: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundpd $12, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundpd $12, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundpd $12, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundpd $12, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_nearbyint_v8f64_v2f64: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundpd $12, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundpd $12, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_nearbyint_v8f64_v2f64: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundpd $12, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundpd $12, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_nearbyint_v8f64_v2f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $12, %xmm0, %xmm0 -; AVX512-NEXT: vroundpd $12, %xmm1, %xmm1 -; AVX512-NEXT: vroundpd $12, %xmm2, %xmm2 -; AVX512-NEXT: vroundpd $12, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $12, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.nearbyint.v2f64(<2 x double> %a1) @@ -90,25 +99,34 @@ define <16 x float> @concat_nearbyint_v16f32_v4f32(<4 x float> %a0, <4 x float> ; SSE-NEXT: roundps $12, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_nearbyint_v16f32_v4f32: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundps $12, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundps $12, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundps $12, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundps $12, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_nearbyint_v16f32_v4f32: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundps $12, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundps $12, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_nearbyint_v16f32_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundps $12, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundps $12, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_nearbyint_v16f32_v4f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $12, %xmm0, %xmm0 -; AVX512-NEXT: vroundps $12, %xmm1, %xmm1 -; AVX512-NEXT: vroundps $12, %xmm2, %xmm2 -; AVX512-NEXT: vroundps $12, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $12, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %a1) @@ -137,9 +155,9 @@ define <8 x double> @concat_nearbyint_v8f64_v4f64(<4 x double> %a0, <4 x double> ; ; AVX512-LABEL: concat_nearbyint_v8f64_v4f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $12, %ymm0, %ymm0 -; AVX512-NEXT: vroundpd $12, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $12, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %a0) %v1 = call <4 x double> @llvm.nearbyint.v4f64(<4 x double> %a1) @@ -164,9 +182,9 @@ define <16 x float> @concat_nearbyint_v16f32_v8f32(<8 x float> %a0, <8 x float> ; ; AVX512-LABEL: concat_nearbyint_v16f32_v8f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $12, %ymm0, %ymm0 -; AVX512-NEXT: vroundps $12, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $12, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %a0) %v1 = call <8 x float> @llvm.nearbyint.v8f32(<8 x float> %a1) diff --git a/llvm/test/CodeGen/X86/combine-frint.ll b/llvm/test/CodeGen/X86/combine-frint.ll index 901ce2c1f0d82..1c52529e8386c 100644 --- a/llvm/test/CodeGen/X86/combine-frint.ll +++ b/llvm/test/CodeGen/X86/combine-frint.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 define <4 x double> @concat_rint_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) { @@ -13,9 +13,9 @@ define <4 x double> @concat_rint_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) ; ; AVX-LABEL: concat_rint_v4f64_v2f64: ; AVX: # %bb.0: -; AVX-NEXT: vroundpd $4, %xmm0, %xmm0 -; AVX-NEXT: vroundpd $4, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundpd $4, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a1) @@ -32,9 +32,9 @@ define <8 x float> @concat_rint_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) { ; ; AVX-LABEL: concat_rint_v8f32_v4f32: ; AVX: # %bb.0: -; AVX-NEXT: vroundps $4, %xmm0, %xmm0 -; AVX-NEXT: vroundps $4, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundps $4, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a1) @@ -51,25 +51,34 @@ define <8 x double> @concat_rint_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, ; SSE-NEXT: roundpd $4, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_rint_v8f64_v2f64: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundpd $4, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundpd $4, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundpd $4, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundpd $4, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_rint_v8f64_v2f64: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundpd $4, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundpd $4, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_rint_v8f64_v2f64: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundpd $4, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundpd $4, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_rint_v8f64_v2f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $4, %xmm0, %xmm0 -; AVX512-NEXT: vroundpd $4, %xmm1, %xmm1 -; AVX512-NEXT: vroundpd $4, %xmm2, %xmm2 -; AVX512-NEXT: vroundpd $4, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $4, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.rint.v2f64(<2 x double> %a1) @@ -90,25 +99,34 @@ define <16 x float> @concat_rint_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, ; SSE-NEXT: roundps $4, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_rint_v16f32_v4f32: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundps $4, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundps $4, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundps $4, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundps $4, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_rint_v16f32_v4f32: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundps $4, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundps $4, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_rint_v16f32_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundps $4, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundps $4, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_rint_v16f32_v4f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $4, %xmm0, %xmm0 -; AVX512-NEXT: vroundps $4, %xmm1, %xmm1 -; AVX512-NEXT: vroundps $4, %xmm2, %xmm2 -; AVX512-NEXT: vroundps $4, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $4, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.rint.v4f32(<4 x float> %a1) @@ -137,9 +155,9 @@ define <8 x double> @concat_rint_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1) ; ; AVX512-LABEL: concat_rint_v8f64_v4f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $4, %ymm0, %ymm0 -; AVX512-NEXT: vroundpd $4, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $4, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x double> @llvm.rint.v4f64(<4 x double> %a0) %v1 = call <4 x double> @llvm.rint.v4f64(<4 x double> %a1) @@ -164,9 +182,9 @@ define <16 x float> @concat_rint_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) ; ; AVX512-LABEL: concat_rint_v16f32_v8f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $4, %ymm0, %ymm0 -; AVX512-NEXT: vroundps $4, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $4, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <8 x float> @llvm.rint.v8f32(<8 x float> %a0) %v1 = call <8 x float> @llvm.rint.v8f32(<8 x float> %a1) diff --git a/llvm/test/CodeGen/X86/combine-froundeven.ll b/llvm/test/CodeGen/X86/combine-froundeven.ll index 484e3a9680450..4bf1e86d887ae 100644 --- a/llvm/test/CodeGen/X86/combine-froundeven.ll +++ b/llvm/test/CodeGen/X86/combine-froundeven.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 define <4 x double> @concat_roundeven_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) { @@ -13,9 +13,9 @@ define <4 x double> @concat_roundeven_v4f64_v2f64(<2 x double> %a0, <2 x double> ; ; AVX-LABEL: concat_roundeven_v4f64_v2f64: ; AVX: # %bb.0: -; AVX-NEXT: vroundpd $8, %xmm0, %xmm0 -; AVX-NEXT: vroundpd $8, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundpd $8, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a1) @@ -32,9 +32,9 @@ define <8 x float> @concat_roundeven_v8f32_v4f32(<4 x float> %a0, <4 x float> %a ; ; AVX-LABEL: concat_roundeven_v8f32_v4f32: ; AVX: # %bb.0: -; AVX-NEXT: vroundps $8, %xmm0, %xmm0 -; AVX-NEXT: vroundps $8, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundps $8, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a1) @@ -51,25 +51,34 @@ define <8 x double> @concat_roundeven_v8f64_v2f64(<2 x double> %a0, <2 x double> ; SSE-NEXT: roundpd $8, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_roundeven_v8f64_v2f64: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundpd $8, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundpd $8, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundpd $8, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundpd $8, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_roundeven_v8f64_v2f64: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundpd $8, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundpd $8, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_roundeven_v8f64_v2f64: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundpd $8, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundpd $8, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_roundeven_v8f64_v2f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $8, %xmm0, %xmm0 -; AVX512-NEXT: vroundpd $8, %xmm1, %xmm1 -; AVX512-NEXT: vroundpd $8, %xmm2, %xmm2 -; AVX512-NEXT: vroundpd $8, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $8, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.roundeven.v2f64(<2 x double> %a1) @@ -90,25 +99,34 @@ define <16 x float> @concat_roundeven_v16f32_v4f32(<4 x float> %a0, <4 x float> ; SSE-NEXT: roundps $8, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_roundeven_v16f32_v4f32: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundps $8, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundps $8, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundps $8, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundps $8, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_roundeven_v16f32_v4f32: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundps $8, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundps $8, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_roundeven_v16f32_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundps $8, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundps $8, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_roundeven_v16f32_v4f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $8, %xmm0, %xmm0 -; AVX512-NEXT: vroundps $8, %xmm1, %xmm1 -; AVX512-NEXT: vroundps $8, %xmm2, %xmm2 -; AVX512-NEXT: vroundps $8, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $8, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.roundeven.v4f32(<4 x float> %a1) @@ -137,9 +155,9 @@ define <8 x double> @concat_roundeven_v8f64_v4f64(<4 x double> %a0, <4 x double> ; ; AVX512-LABEL: concat_roundeven_v8f64_v4f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $8, %ymm0, %ymm0 -; AVX512-NEXT: vroundpd $8, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $8, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %a0) %v1 = call <4 x double> @llvm.roundeven.v4f64(<4 x double> %a1) @@ -164,9 +182,9 @@ define <16 x float> @concat_roundeven_v16f32_v8f32(<8 x float> %a0, <8 x float> ; ; AVX512-LABEL: concat_roundeven_v16f32_v8f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $8, %ymm0, %ymm0 -; AVX512-NEXT: vroundps $8, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $8, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %a0) %v1 = call <8 x float> @llvm.roundeven.v8f32(<8 x float> %a1) diff --git a/llvm/test/CodeGen/X86/combine-ftrunc.ll b/llvm/test/CodeGen/X86/combine-ftrunc.ll index a6c703a1cbeae..3dde226db73df 100644 --- a/llvm/test/CodeGen/X86/combine-ftrunc.ll +++ b/llvm/test/CodeGen/X86/combine-ftrunc.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v2 | FileCheck %s --check-prefixes=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 define <4 x double> @concat_trunc_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) { @@ -13,9 +13,9 @@ define <4 x double> @concat_trunc_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1 ; ; AVX-LABEL: concat_trunc_v4f64_v2f64: ; AVX: # %bb.0: -; AVX-NEXT: vroundpd $11, %xmm0, %xmm0 -; AVX-NEXT: vroundpd $11, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundpd $11, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a1) @@ -32,9 +32,9 @@ define <8 x float> @concat_trunc_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) { ; ; AVX-LABEL: concat_trunc_v8f32_v4f32: ; AVX: # %bb.0: -; AVX-NEXT: vroundps $11, %xmm0, %xmm0 -; AVX-NEXT: vroundps $11, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundps $11, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a1) @@ -51,25 +51,34 @@ define <8 x double> @concat_trunc_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1 ; SSE-NEXT: roundpd $11, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_trunc_v8f64_v2f64: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundpd $11, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundpd $11, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundpd $11, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundpd $11, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_trunc_v8f64_v2f64: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundpd $11, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundpd $11, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_trunc_v8f64_v2f64: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundpd $11, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundpd $11, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_trunc_v8f64_v2f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $11, %xmm0, %xmm0 -; AVX512-NEXT: vroundpd $11, %xmm1, %xmm1 -; AVX512-NEXT: vroundpd $11, %xmm2, %xmm2 -; AVX512-NEXT: vroundpd $11, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $11, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a0) %v1 = call <2 x double> @llvm.trunc.v2f64(<2 x double> %a1) @@ -90,25 +99,34 @@ define <16 x float> @concat_trunc_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, ; SSE-NEXT: roundps $11, %xmm3, %xmm3 ; SSE-NEXT: retq ; -; AVX1OR2-LABEL: concat_trunc_v16f32_v4f32: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundps $11, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundps $11, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundps $11, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundps $11, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_trunc_v16f32_v4f32: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundps $11, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundps $11, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_trunc_v16f32_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundps $11, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundps $11, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_trunc_v16f32_v4f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $11, %xmm0, %xmm0 -; AVX512-NEXT: vroundps $11, %xmm1, %xmm1 -; AVX512-NEXT: vroundps $11, %xmm2, %xmm2 -; AVX512-NEXT: vroundps $11, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $11, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a0) %v1 = call <4 x float> @llvm.trunc.v4f32(<4 x float> %a1) @@ -137,9 +155,9 @@ define <8 x double> @concat_trunc_v8f64_v4f64(<4 x double> %a0, <4 x double> %a1 ; ; AVX512-LABEL: concat_trunc_v8f64_v4f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $11, %ymm0, %ymm0 -; AVX512-NEXT: vroundpd $11, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $11, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x double> @llvm.trunc.v4f64(<4 x double> %a0) %v1 = call <4 x double> @llvm.trunc.v4f64(<4 x double> %a1) @@ -164,9 +182,9 @@ define <16 x float> @concat_trunc_v16f32_v8f32(<8 x float> %a0, <8 x float> %a1) ; ; AVX512-LABEL: concat_trunc_v16f32_v8f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $11, %ymm0, %ymm0 -; AVX512-NEXT: vroundps $11, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $11, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <8 x float> @llvm.trunc.v8f32(<8 x float> %a0) %v1 = call <8 x float> @llvm.trunc.v8f32(<8 x float> %a1) diff --git a/llvm/test/CodeGen/X86/combine-rndscale.ll b/llvm/test/CodeGen/X86/combine-rndscale.ll index 25117e864b512..b557dd8106d8e 100644 --- a/llvm/test/CodeGen/X86/combine-rndscale.ll +++ b/llvm/test/CodeGen/X86/combine-rndscale.ll @@ -1,14 +1,14 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=sandybridge | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v3 | FileCheck %s --check-prefixes=AVX,AVX1OR2,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=AVX,AVX512 define <4 x double> @concat_roundpd_v4f64_v2f64(<2 x double> %a0, <2 x double> %a1) { ; AVX-LABEL: concat_roundpd_v4f64_v2f64: ; AVX: # %bb.0: -; AVX-NEXT: vroundpd $4, %xmm0, %xmm0 -; AVX-NEXT: vroundpd $4, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundpd $4, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 4) %v1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a1, i32 4) @@ -19,9 +19,9 @@ define <4 x double> @concat_roundpd_v4f64_v2f64(<2 x double> %a0, <2 x double> % define <8 x float> @concat_roundps_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) { ; AVX-LABEL: concat_roundps_v8f32_v4f32: ; AVX: # %bb.0: -; AVX-NEXT: vroundps $4, %xmm0, %xmm0 -; AVX-NEXT: vroundps $4, %xmm1, %xmm1 +; AVX-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX-NEXT: vroundps $4, %ymm0, %ymm0 ; AVX-NEXT: retq %v0 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 4) %v1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a1, i32 4) @@ -30,25 +30,34 @@ define <8 x float> @concat_roundps_v8f32_v4f32(<4 x float> %a0, <4 x float> %a1) } define <8 x double> @concat_roundpd_v8f64_v2f64(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, <2 x double> %a3) { -; AVX1OR2-LABEL: concat_roundpd_v8f64_v2f64: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundpd $4, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundpd $4, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundpd $4, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundpd $4, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_roundpd_v8f64_v2f64: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundpd $4, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundpd $4, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_roundpd_v8f64_v2f64: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundpd $4, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundpd $4, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_roundpd_v8f64_v2f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $4, %xmm0, %xmm0 -; AVX512-NEXT: vroundpd $4, %xmm1, %xmm1 -; AVX512-NEXT: vroundpd $4, %xmm2, %xmm2 -; AVX512-NEXT: vroundpd $4, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $4, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a0, i32 4) %v1 = call <2 x double> @llvm.x86.sse41.round.pd(<2 x double> %a1, i32 4) @@ -61,25 +70,34 @@ define <8 x double> @concat_roundpd_v8f64_v2f64(<2 x double> %a0, <2 x double> % } define <16 x float> @concat_roundps_v16f32_v4f32(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, <4 x float> %a3) { -; AVX1OR2-LABEL: concat_roundps_v16f32_v4f32: -; AVX1OR2: # %bb.0: -; AVX1OR2-NEXT: vroundps $4, %xmm0, %xmm0 -; AVX1OR2-NEXT: vroundps $4, %xmm1, %xmm1 -; AVX1OR2-NEXT: vroundps $4, %xmm2, %xmm2 -; AVX1OR2-NEXT: vroundps $4, %xmm3, %xmm3 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1OR2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 -; AVX1OR2-NEXT: retq +; AVX1-LABEL: concat_roundps_v16f32_v4f32: +; AVX1: # %bb.0: +; AVX1-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: vroundps $4, %ymm0, %ymm0 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX1-NEXT: vroundps $4, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_roundps_v16f32_v4f32: +; AVX2: # %bb.0: +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 +; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX2-NEXT: vroundps $4, %ymm0, %ymm0 +; AVX2-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX2-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm1 +; AVX2-NEXT: vroundps $4, %ymm1, %ymm1 +; AVX2-NEXT: retq ; ; AVX512-LABEL: concat_roundps_v16f32_v4f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $4, %xmm0, %xmm0 -; AVX512-NEXT: vroundps $4, %xmm1, %xmm1 -; AVX512-NEXT: vroundps $4, %xmm2, %xmm2 -; AVX512-NEXT: vroundps $4, %xmm3, %xmm3 +; AVX512-NEXT: # kill: def $xmm2 killed $xmm2 def $ymm2 +; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 ; AVX512-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $4, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a0, i32 4) %v1 = call <4 x float> @llvm.x86.sse41.round.ps(<4 x float> %a1, i32 4) @@ -100,9 +118,9 @@ define <8 x double> @concat_roundpd_v8f64_v4f64(<4 x double> %a0, <4 x double> % ; ; AVX512-LABEL: concat_roundpd_v8f64_v4f64: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundpd $4, %ymm0, %ymm0 -; AVX512-NEXT: vroundpd $4, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscalepd $4, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a0, i32 4) %v1 = call <4 x double> @llvm.x86.avx.round.pd.256(<4 x double> %a1, i32 4) @@ -119,9 +137,9 @@ define <16 x float> @concat_roundps_v16f32_v8f32(<8 x float> %a0, <8 x float> %a ; ; AVX512-LABEL: concat_roundps_v16f32_v8f32: ; AVX512: # %bb.0: -; AVX512-NEXT: vroundps $4, %ymm0, %ymm0 -; AVX512-NEXT: vroundps $4, %ymm1, %ymm1 +; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0 +; AVX512-NEXT: vrndscaleps $4, %zmm0, %zmm0 ; AVX512-NEXT: retq %v0 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a0, i32 4) %v1 = call <8 x float> @llvm.x86.avx.round.ps.256(<8 x float> %a1, i32 4)