From 7968b981bf094f7bb2942a8389de4bf496676cc4 Mon Sep 17 00:00:00 2001 From: Charlie Turner Date: Tue, 17 Nov 2015 17:25:15 +0000 Subject: [PATCH] [ARM] Don't pessimize i32 vselect. The underlying issues surrounding codegen for 32-bit vselects have been resolved. The pessimistic costs for 64-bit vselects remain due to the bad scalarization that is still happening there. I tested this on A57 in T32, A32 and A64 modes. I saw no regressions, and some improvements. From my benchmarks, I saw these improvements in A57 (T32) spec.cpu2000.ref.177_mesa 5.95% lnt.SingleSource/Benchmarks/Shootout/strcat 12.93% lnt.MultiSource/Benchmarks/MiBench/telecomm-CRC32/telecomm-CRC32 11.89% I also measured A57 A32, A53 T32 and A9 T32 and found no performance regressions. I see much bigger wins in third-party benchmarks with this change Differential Revision: http://reviews.llvm.org/D14743 llvm-svn: 253349 --- llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp | 3 --- llvm/test/Analysis/CostModel/ARM/select.ll | 6 +++--- llvm/test/CodeGen/ARM/vselect_imax.ll | 11 ++++++----- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp index 45a45a7013c1e..582a057e9234f 100644 --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -274,9 +274,6 @@ int ARMTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) { if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) { // Lowering of some vector selects is currently far from perfect. static const TypeConversionCostTblEntry NEONVectorSelectTbl[] = { - { ISD::SELECT, MVT::v16i1, MVT::v16i16, 2*16 + 1 + 3*1 + 4*1 }, - { ISD::SELECT, MVT::v8i1, MVT::v8i32, 4*8 + 1*3 + 1*4 + 1*2 }, - { ISD::SELECT, MVT::v16i1, MVT::v16i32, 4*16 + 1*6 + 1*8 + 1*4 }, { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 }, { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 } diff --git a/llvm/test/Analysis/CostModel/ARM/select.ll b/llvm/test/Analysis/CostModel/ARM/select.ll index 21eef83c4bbea..57e1418a3f288 100644 --- a/llvm/test/Analysis/CostModel/ARM/select.ll +++ b/llvm/test/Analysis/CostModel/ARM/select.ll @@ -34,16 +34,16 @@ define void @casts() { %v12 = select <4 x i1> undef, <4 x i16> undef, <4 x i16> undef ; CHECK: cost of 1 {{.*}} select %v13 = select <8 x i1> undef, <8 x i16> undef, <8 x i16> undef - ; CHECK: cost of 40 {{.*}} select + ; CHECK: cost of 2 {{.*}} select %v13b = select <16 x i1> undef, <16 x i16> undef, <16 x i16> undef ; CHECK: cost of 1 {{.*}} select %v14 = select <2 x i1> undef, <2 x i32> undef, <2 x i32> undef ; CHECK: cost of 1 {{.*}} select %v15 = select <4 x i1> undef, <4 x i32> undef, <4 x i32> undef - ; CHECK: cost of 41 {{.*}} select + ; CHECK: cost of 2 {{.*}} select %v15b = select <8 x i1> undef, <8 x i32> undef, <8 x i32> undef - ; CHECK: cost of 82 {{.*}} select + ; CHECK: cost of 4 {{.*}} select %v15c = select <16 x i1> undef, <16 x i32> undef, <16 x i32> undef ; CHECK: cost of 1 {{.*}} select diff --git a/llvm/test/CodeGen/ARM/vselect_imax.ll b/llvm/test/CodeGen/ARM/vselect_imax.ll index 3f52ac2db87b0..85c8c5cfcda14 100644 --- a/llvm/test/CodeGen/ARM/vselect_imax.ll +++ b/llvm/test/CodeGen/ARM/vselect_imax.ll @@ -10,8 +10,6 @@ define void @vmax_v4i32(<4 x i32>* %m, <4 x i32> %a, <4 x i32> %b) { ret void } -; We adjusted the cost model of the following selects. When we improve code -; lowering we also need to adjust the cost. %T0_10 = type <16 x i16> %T1_10 = type <16 x i1> ; CHECK-LABEL: func_blend10: @@ -23,7 +21,7 @@ define void @func_blend10(%T0_10* %loadaddr, %T0_10* %loadaddr2, ; CHECK: vmin.s16 ; CHECK: vmin.s16 ; COST: func_blend10 -; COST: cost of 40 {{.*}} select +; COST: cost of 2 {{.*}} select %r = select %T1_10 %c, %T0_10 %v0, %T0_10 %v1 store %T0_10 %r, %T0_10* %storeaddr ret void @@ -39,7 +37,7 @@ define void @func_blend14(%T0_14* %loadaddr, %T0_14* %loadaddr2, ; CHECK: vmin.s32 ; CHECK: vmin.s32 ; COST: func_blend14 -; COST: cost of 41 {{.*}} select +; COST: cost of 2 {{.*}} select %r = select %T1_14 %c, %T0_14 %v0, %T0_14 %v1 store %T0_14 %r, %T0_14* %storeaddr ret void @@ -55,11 +53,14 @@ define void @func_blend15(%T0_15* %loadaddr, %T0_15* %loadaddr2, %v1 = load %T0_15, %T0_15* %loadaddr2 %c = icmp slt %T0_15 %v0, %v1 ; COST: func_blend15 -; COST: cost of 82 {{.*}} select +; COST: cost of 4 {{.*}} select %r = select %T1_15 %c, %T0_15 %v0, %T0_15 %v1 store %T0_15 %r, %T0_15* %storeaddr ret void } + +; We adjusted the cost model of the following selects. When we improve code +; lowering we also need to adjust the cost. %T0_18 = type <4 x i64> %T1_18 = type <4 x i1> ; CHECK-LABEL: func_blend18: