Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 21 additions & 10 deletions llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9899,17 +9899,28 @@ SDValue TargetLowering::expandBSWAP(SDNode *N, SelectionDAG &DAG) const {
// Use a rotate by 8. This can be further expanded if necessary.
return DAG.getNode(ISD::ROTL, dl, VT, Op, DAG.getConstant(8, dl, SHVT));
case MVT::i32:
// This is meant for ARM speficially, which has ROTR but no ROTL.
// This is meant for ARM specifically, which has ROTR but no ROTL.
if (isOperationLegalOrCustom(ISD::ROTR, VT)) {
SDValue Mask = DAG.getConstant(0x00FF00FF, dl, VT);
// (x & 0x00FF00FF) rotr 8 | (x rotl 8) & 0x00FF00FF
SDValue And = DAG.getNode(ISD::AND, dl, VT, Op, Mask);
SDValue Rotr =
DAG.getNode(ISD::ROTR, dl, VT, And, DAG.getConstant(8, dl, SHVT));
SDValue Rotl =
DAG.getNode(ISD::ROTR, dl, VT, Op, DAG.getConstant(24, dl, SHVT));
SDValue And2 = DAG.getNode(ISD::AND, dl, VT, Rotl, Mask);
return DAG.getNode(ISD::OR, dl, VT, Rotr, And2);
// eor r3, r0, r0, ror #16
SDValue Ror16 =
DAG.getNode(ISD::ROTR, dl, VT, Op, DAG.getConstant(16, dl, SHVT));
SDValue Xor1 = DAG.getNode(ISD::XOR, dl, VT, Op, Ror16);

// lsr r3, r3, #8
SDValue Lsr8 = DAG.getNode(ISD::SRL, dl, VT, Xor1,
DAG.getConstant(8, dl, SHVT));

// bic r3, r3, #65280 (0xFF00)
// So we need the negated value: ~0x0000FF00 = 0xFFFF00FF
SDValue Mask = DAG.getConstant(0xFFFF00FFu, dl, VT);
SDValue BicResult = DAG.getNode(ISD::AND, dl, VT, Lsr8, Mask);

// ror r0, r0, #8
SDValue Ror8 =
DAG.getNode(ISD::ROTR, dl, VT, Op, DAG.getConstant(8, dl, SHVT));

// eor r0, r3, r0, ror #8
return DAG.getNode(ISD::XOR, dl, VT, BicResult, Ror8);
}
Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, dl, SHVT));
Tmp3 = DAG.getNode(ISD::AND, dl, VT, Op,
Expand Down
99 changes: 73 additions & 26 deletions llvm/lib/Target/ARM/ARMISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ using namespace llvm;
#define DEBUG_TYPE "arm-isel"

STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
STATISTIC(NumConstpoolPromoted,
Expand All @@ -142,6 +143,12 @@ static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
cl::desc("Maximum size of ALL constants to promote into a constant pool"),
cl::init(128));

static cl::opt<bool>
EnableOptimizeLogicalImm("arm-enable-logical-imm", cl::Hidden,
cl::desc("Enable ARM logical imm instruction "
"optimization"),
cl::init(true));

cl::opt<unsigned>
MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
cl::desc("Maximum interleave factor for MVE VLDn to generate."),
Expand Down Expand Up @@ -20138,6 +20145,16 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
}
}

static bool isLegalLogicalImmediate(unsigned Imm,
const ARMSubtarget *Subtarget) {
if (!Subtarget->isThumb())
return ARM_AM::getSOImmVal(Imm) != -1;
if (Subtarget->isThumb2())
return ARM_AM::getT2SOImmVal(Imm) != -1;
// Thumb1 only has 8-bit unsigned immediate.
return Imm <= 255;
}

bool ARMTargetLowering::targetShrinkDemandedConstant(
SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
TargetLoweringOpt &TLO) const {
Expand All @@ -20146,8 +20163,7 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
if (!TLO.LegalOps)
return false;

// Only optimize AND for now.
if (Op.getOpcode() != ISD::AND)
if (!EnableOptimizeLogicalImm)
return false;

EVT VT = Op.getValueType();
Expand All @@ -20158,28 +20174,28 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(

assert(VT == MVT::i32 && "Unexpected integer type");

// Exit early if we demand all bits.
if (DemandedBits.popcount() == 32)
return false;

// Only optimize AND for now.
if (Op.getOpcode() != ISD::AND)
return false;

// Make sure the RHS really is a constant.
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
if (!C)
return false;

unsigned Mask = C->getZExtValue();

if (Mask == 0 || Mask == ~0U)
return false;

unsigned Demanded = DemandedBits.getZExtValue();
unsigned ShrunkMask = Mask & Demanded;
unsigned ExpandedMask = Mask | ~Demanded;

// If the mask is all zeros, let the target-independent code replace the
// result with zero.
if (ShrunkMask == 0)
return false;

// If the mask is all ones, erase the AND. (Currently, the target-independent
// code won't do this, so we have to do it explicitly to avoid an infinite
// loop in obscure cases.)
if (ExpandedMask == ~0U)
return TLO.CombineTo(Op, Op.getOperand(0));

auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
};
Expand All @@ -20192,30 +20208,61 @@ bool ARMTargetLowering::targetShrinkDemandedConstant(
return TLO.CombineTo(Op, NewOp);
};

// Prefer uxtb mask.
if (IsLegalMask(0xFF))
return UseMask(0xFF);
// If the mask is all zeros, let the target-independent code replace the
// result with zero.
if (ShrunkMask == 0) {
++NumOptimizedImms;
return UseMask(ShrunkMask);
}

// Prefer uxth mask.
if (IsLegalMask(0xFFFF))
return UseMask(0xFFFF);
// If the mask is all ones, erase the AND. (Currently, the target-independent
// code won't do this, so we have to do it explicitly to avoid an infinite
// loop in obscure cases.)
if (ExpandedMask == ~0U) {
++NumOptimizedImms;
return UseMask(ExpandedMask);
}

// [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
// FIXME: Prefer a contiguous sequence of bits for other optimizations.
if (ShrunkMask < 256)
// If thumb, check for uxth and uxtb masks first and foremost.
if (Subtarget->isThumb1Only() && Subtarget->hasV6Ops()) {
if (IsLegalMask(0xFF)) {
++NumOptimizedImms;
return UseMask(0xFF);
}

if (IsLegalMask(0xFFFF)) {
++NumOptimizedImms;
return UseMask(0xFFFF);
}
}

// Don't optimize if it is legal already.
if (isLegalLogicalImmediate(Mask, Subtarget))
return false;

if (isLegalLogicalImmediate(ShrunkMask, Subtarget)) {
++NumOptimizedImms;
return UseMask(ShrunkMask);
}

// [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
// FIXME: Prefer a contiguous sequence of bits for other optimizations.
if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
// FIXME: The check for v6 is because this interferes with some ubfx
// optimizations
if (!Subtarget->hasV6Ops() &&
isLegalLogicalImmediate(~ExpandedMask, Subtarget)) {
++NumOptimizedImms;
return UseMask(ExpandedMask);
}

if ((~ExpandedMask) < 256) {
++NumOptimizedImms;
return UseMask(ExpandedMask);
}

// Potential improvements:
//
// We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
// We could try to prefer Thumb1 immediates which can be lowered to a
// two-instruction sequence.
// We could try to recognize more legal ARM/Thumb2 immediates here.

return false;
}
Expand Down
26 changes: 0 additions & 26 deletions llvm/lib/Target/ARM/README.txt
Original file line number Diff line number Diff line change
Expand Up @@ -606,32 +606,6 @@ constant which was already loaded). Not sure what's necessary to do that.

//===---------------------------------------------------------------------===//

The code generated for bswap on armv4/5 (CPUs without rev) is less than ideal:

int a(int x) { return __builtin_bswap32(x); }

a:
mov r1, #255, 24
mov r2, #255, 16
and r1, r1, r0, lsr #8
and r2, r2, r0, lsl #8
orr r1, r1, r0, lsr #24
orr r0, r2, r0, lsl #24
orr r0, r0, r1
bx lr

Something like the following would be better (fewer instructions/registers):
eor r1, r0, r0, ror #16
bic r1, r1, #0xff0000
mov r1, r1, lsr #8
eor r0, r1, r0, ror #8
bx lr

A custom Thumb version would also be a slight improvement over the generic
version.

//===---------------------------------------------------------------------===//

Consider the following simple C code:

void foo(unsigned char *a, unsigned char *b, int *c) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/ARM/and-cmpz.ll
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ false:
; T1: uxth r0, r0
; T1-NEXT: lsrs r0, r0, #9
; T1-NEXT: bne
; T2: uxth r0, r0
; T2: and r0, r0, #65024
; T2-NEXT: movs r2, #0
; T2-NEXT: cmp.w r2, r0, lsr #9
define void @i16_cmpz(i16 %x, ptr %foo) {
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/ARM/fpenv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ define void @func_05() {
; CHECK-LABEL: func_05:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmrs r0, fpscr
; CHECK-NEXT: bic r0, r0, #12582912
; CHECK-NEXT: orr r0, r0, #4194304
; CHECK-NEXT: bic r0, r0, #8388608
; CHECK-NEXT: vmsr fpscr, r0
; CHECK-NEXT: mov pc, lr
call void @llvm.set.rounding(i32 2)
Expand All @@ -53,8 +53,8 @@ define void @func_06() {
; CHECK-LABEL: func_06:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmrs r0, fpscr
; CHECK-NEXT: bic r0, r0, #12582912
; CHECK-NEXT: orr r0, r0, #8388608
; CHECK-NEXT: bic r0, r0, #4194304
; CHECK-NEXT: vmsr fpscr, r0
; CHECK-NEXT: mov pc, lr
call void @llvm.set.rounding(i32 3)
Expand Down
5 changes: 2 additions & 3 deletions llvm/test/CodeGen/ARM/funnel-shift-rot.ll
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ declare <4 x i32> @llvm.fshr.v4i32(<4 x i32>, <4 x i32>, <4 x i32>)
define i8 @rotl_i8_const_shift(i8 %x) {
; CHECK-LABEL: rotl_i8_const_shift:
; CHECK: @ %bb.0:
; CHECK-NEXT: uxtb r1, r0
; CHECK-NEXT: and r1, r0, #224
; CHECK-NEXT: lsl r0, r0, #3
; CHECK-NEXT: orr r0, r0, r1, lsr #5
; CHECK-NEXT: bx lr
Expand Down Expand Up @@ -161,8 +161,7 @@ define <4 x i32> @rotl_v4i32_rotl_const_shift(<4 x i32> %x) {
define i8 @rotr_i8_const_shift(i8 %x) {
; CHECK-LABEL: rotr_i8_const_shift:
; CHECK: @ %bb.0:
; CHECK-NEXT: uxtb r1, r0
; CHECK-NEXT: lsr r1, r1, #3
; CHECK-NEXT: ubfx r1, r0, #3, #5
; CHECK-NEXT: orr r0, r1, r0, lsl #5
; CHECK-NEXT: bx lr
%f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3)
Expand Down
56 changes: 36 additions & 20 deletions llvm/test/CodeGen/ARM/hoist-and-by-const-from-lshr-in-eqcmp-zero.ll
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
; ARM-LABEL: scalar_i8_signbit_eq:
; ARM: @ %bb.0:
; ARM-NEXT: uxtb r1, r1
; ARM-NEXT: lsl r0, r0, r1
; ARM-NEXT: mov r2, #128
; ARM-NEXT: and r0, r2, r0, lsl r1
; ARM-NEXT: mov r1, #1
; ARM-NEXT: uxtb r0, r0
; ARM-NEXT: eor r0, r1, r0, lsr #7
; ARM-NEXT: bx lr
;
Expand All @@ -42,7 +42,7 @@ define i1 @scalar_i8_signbit_eq(i8 %x, i8 %y) nounwind {
; THUMB78-NEXT: uxtb r1, r1
; THUMB78-NEXT: lsls r0, r1
; THUMB78-NEXT: movs r1, #1
; THUMB78-NEXT: uxtb r0, r0
; THUMB78-NEXT: and r0, r0, #128
; THUMB78-NEXT: eor.w r0, r1, r0, lsr #7
; THUMB78-NEXT: bx lr
%t0 = lshr i8 128, %y
Expand Down Expand Up @@ -122,9 +122,9 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
; ARM-LABEL: scalar_i16_signbit_eq:
; ARM: @ %bb.0:
; ARM-NEXT: uxth r1, r1
; ARM-NEXT: lsl r0, r0, r1
; ARM-NEXT: mov r2, #32768
; ARM-NEXT: and r0, r2, r0, lsl r1
; ARM-NEXT: mov r1, #1
; ARM-NEXT: uxth r0, r0
; ARM-NEXT: eor r0, r1, r0, lsr #15
; ARM-NEXT: bx lr
;
Expand All @@ -144,7 +144,7 @@ define i1 @scalar_i16_signbit_eq(i16 %x, i16 %y) nounwind {
; THUMB78-NEXT: uxth r1, r1
; THUMB78-NEXT: lsls r0, r1
; THUMB78-NEXT: movs r1, #1
; THUMB78-NEXT: uxth r0, r0
; THUMB78-NEXT: and r0, r0, #32768
; THUMB78-NEXT: eor.w r0, r1, r0, lsr #15
; THUMB78-NEXT: bx lr
%t0 = lshr i16 32768, %y
Expand Down Expand Up @@ -862,21 +862,35 @@ define <4 x i1> @vec_4xi32_nonsplat_undef2_eq(<4 x i32> %x, <4 x i32> %y) nounwi
;------------------------------------------------------------------------------;

define i1 @scalar_i8_signbit_ne(i8 %x, i8 %y) nounwind {
; ARM-LABEL: scalar_i8_signbit_ne:
; ARM: @ %bb.0:
; ARM-NEXT: uxtb r1, r1
; ARM-NEXT: lsl r0, r0, r1
; ARM-NEXT: uxtb r0, r0
; ARM-NEXT: lsr r0, r0, #7
; ARM-NEXT: bx lr
; ARM6-LABEL: scalar_i8_signbit_ne:
; ARM6: @ %bb.0:
; ARM6-NEXT: uxtb r1, r1
; ARM6-NEXT: mov r2, #128
; ARM6-NEXT: and r0, r2, r0, lsl r1
; ARM6-NEXT: lsr r0, r0, #7
; ARM6-NEXT: bx lr
;
; THUMB-LABEL: scalar_i8_signbit_ne:
; THUMB: @ %bb.0:
; THUMB-NEXT: uxtb r1, r1
; THUMB-NEXT: lsls r0, r1
; THUMB-NEXT: uxtb r0, r0
; THUMB-NEXT: lsrs r0, r0, #7
; THUMB-NEXT: bx lr
; ARM78-LABEL: scalar_i8_signbit_ne:
; ARM78: @ %bb.0:
; ARM78-NEXT: uxtb r1, r1
; ARM78-NEXT: lsl r0, r0, r1
; ARM78-NEXT: ubfx r0, r0, #7, #1
; ARM78-NEXT: bx lr
;
; THUMB6-LABEL: scalar_i8_signbit_ne:
; THUMB6: @ %bb.0:
; THUMB6-NEXT: uxtb r1, r1
; THUMB6-NEXT: lsls r0, r1
; THUMB6-NEXT: uxtb r0, r0
; THUMB6-NEXT: lsrs r0, r0, #7
; THUMB6-NEXT: bx lr
;
; THUMB78-LABEL: scalar_i8_signbit_ne:
; THUMB78: @ %bb.0:
; THUMB78-NEXT: uxtb r1, r1
; THUMB78-NEXT: lsls r0, r1
; THUMB78-NEXT: ubfx r0, r0, #7, #1
; THUMB78-NEXT: bx lr
%t0 = lshr i8 128, %y
%t1 = and i8 %t0, %x
%res = icmp ne i8 %t1, 0 ; we are perfectly happy with 'ne' predicate
Expand Down Expand Up @@ -1051,3 +1065,5 @@ define i1 @scalar_i8_signbit_eq_with_nonzero(i8 %x, i8 %y) nounwind {
%res = icmp eq i8 %t1, 1 ; should be comparing with 0
ret i1 %res
}
;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
; THUMB: {{.*}}
Loading
Loading