381 changes: 273 additions & 108 deletions llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion llvm/lib/Support/LowLevelType.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ void LLT::print(raw_ostream &OS) const {
assert(isScalar() && "unexpected type");
OS << "s" << getScalarSizeInBits();
} else
llvm_unreachable("trying to print an invalid type");
OS << "LLT_invalid";
}

const constexpr LLT::BitFieldInfo LLT::ScalarSizeFieldInfo;
Expand Down
169 changes: 128 additions & 41 deletions llvm/lib/Target/AArch64/AArch64LegalizerInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,110 @@

using namespace llvm;

/// FIXME: The following static functions are SizeChangeStrategy functions
/// that are meant to temporarily mimic the behaviour of the old legalization
/// based on doubling/halving non-legal types as closely as possible. This is
/// not entirly possible as only legalizing the types that are exactly a power
/// of 2 times the size of the legal types would require specifying all those
/// sizes explicitly.
/// In practice, not specifying those isn't a problem, and the below functions
/// should disappear quickly as we add support for legalizing non-power-of-2
/// sized types further.
static void
addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result,
const LegalizerInfo::SizeAndActionsVec &v) {
for (unsigned i = 0; i < v.size(); ++i) {
result.push_back(v[i]);
if (i + 1 < v[i].first && i + 1 < v.size() &&
v[i + 1].first != v[i].first + 1)
result.push_back({v[i].first + 1, LegalizerInfo::Unsupported});
}
}

static LegalizerInfo::SizeAndActionsVec
widen_1_narrow_128_ToLargest(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 2);
LegalizerInfo::SizeAndActionsVec result = {{1, LegalizerInfo::WidenScalar},
{2, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
assert(Largest + 1 < 128);
result.push_back({Largest + 1, LegalizerInfo::Unsupported});
result.push_back({128, LegalizerInfo::NarrowScalar});
result.push_back({129, LegalizerInfo::Unsupported});
return result;
}

static LegalizerInfo::SizeAndActionsVec
widen_16(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 17);
LegalizerInfo::SizeAndActionsVec result = {{1, LegalizerInfo::Unsupported},
{16, LegalizerInfo::WidenScalar},
{17, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
result.push_back({Largest + 1, LegalizerInfo::Unsupported});
return result;
}

static LegalizerInfo::SizeAndActionsVec
widen_1_8(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 9);
LegalizerInfo::SizeAndActionsVec result = {
{1, LegalizerInfo::WidenScalar}, {2, LegalizerInfo::Unsupported},
{8, LegalizerInfo::WidenScalar}, {9, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
result.push_back({Largest + 1, LegalizerInfo::Unsupported});
return result;
}

static LegalizerInfo::SizeAndActionsVec
widen_1_8_16(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 17);
LegalizerInfo::SizeAndActionsVec result = {
{1, LegalizerInfo::WidenScalar}, {2, LegalizerInfo::Unsupported},
{8, LegalizerInfo::WidenScalar}, {9, LegalizerInfo::Unsupported},
{16, LegalizerInfo::WidenScalar}, {17, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
result.push_back({Largest + 1, LegalizerInfo::Unsupported});
return result;
}

static LegalizerInfo::SizeAndActionsVec
widen_1_8_16_narrowToLargest(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 17);
LegalizerInfo::SizeAndActionsVec result = {
{1, LegalizerInfo::WidenScalar}, {2, LegalizerInfo::Unsupported},
{8, LegalizerInfo::WidenScalar}, {9, LegalizerInfo::Unsupported},
{16, LegalizerInfo::WidenScalar}, {17, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
result.push_back({Largest + 1, LegalizerInfo::NarrowScalar});
return result;
}

static LegalizerInfo::SizeAndActionsVec
widen_1_8_16_32(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 33);
LegalizerInfo::SizeAndActionsVec result = {
{1, LegalizerInfo::WidenScalar}, {2, LegalizerInfo::Unsupported},
{8, LegalizerInfo::WidenScalar}, {9, LegalizerInfo::Unsupported},
{16, LegalizerInfo::WidenScalar}, {17, LegalizerInfo::Unsupported},
{32, LegalizerInfo::WidenScalar}, {33, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
result.push_back({Largest + 1, LegalizerInfo::Unsupported});
return result;
}

AArch64LegalizerInfo::AArch64LegalizerInfo() {
using namespace TargetOpcode;
const LLT p0 = LLT::pointer(0, 64);
Expand All @@ -42,8 +146,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo() {
for (auto Ty : {s16, s32, s64, p0})
setAction({G_PHI, Ty}, Legal);

for (auto Ty : {s1, s8})
setAction({G_PHI, Ty}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0, widen_1_8);

for (auto Ty : { s32, s64 })
setAction({G_BSWAP, Ty}, Legal);
Expand All @@ -54,32 +157,33 @@ AArch64LegalizerInfo::AArch64LegalizerInfo() {
for (auto Ty : {s32, s64, v2s32, v4s32, v2s64})
setAction({BinOp, Ty}, Legal);

for (auto Ty : {s1, s8, s16})
setAction({BinOp, Ty}, WidenScalar);
if (BinOp != G_ADD)
setLegalizeScalarToDifferentSizeStrategy(BinOp, 0,
widen_1_8_16_narrowToLargest);
}

setAction({G_GEP, p0}, Legal);
setAction({G_GEP, 1, s64}, Legal);

for (auto Ty : {s1, s8, s16, s32})
setAction({G_GEP, 1, Ty}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(G_GEP, 1, widen_1_8_16_32);

setAction({G_PTR_MASK, p0}, Legal);

for (unsigned BinOp : {G_LSHR, G_ASHR, G_SDIV, G_UDIV}) {
for (auto Ty : {s32, s64})
setAction({BinOp, Ty}, Legal);

for (auto Ty : {s1, s8, s16})
setAction({BinOp, Ty}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(BinOp, 0, widen_1_8_16);
}

for (unsigned BinOp : {G_SREM, G_UREM})
for (auto Ty : { s1, s8, s16, s32, s64 })
setAction({BinOp, Ty}, Lower);

for (unsigned Op : {G_SMULO, G_UMULO})
setAction({Op, s64}, Lower);
for (unsigned Op : {G_SMULO, G_UMULO}) {
setAction({Op, 0, s64}, Lower);
setAction({Op, 1, s1}, Legal);
}

for (unsigned Op : {G_UADDE, G_USUBE, G_SADDO, G_SSUBO, G_SMULH, G_UMULH}) {
for (auto Ty : { s32, s64 })
Expand All @@ -101,8 +205,9 @@ AArch64LegalizerInfo::AArch64LegalizerInfo() {
setAction({G_INSERT, Ty}, Legal);
setAction({G_INSERT, 1, Ty}, Legal);
}
setLegalizeScalarToDifferentSizeStrategy(G_INSERT, 0,
widen_1_8_16_narrowToLargest);
for (auto Ty : {s1, s8, s16}) {
setAction({G_INSERT, Ty}, WidenScalar);
setAction({G_INSERT, 1, Ty}, Legal);
// FIXME: Can't widen the sources because that violates the constraints on
// G_INSERT (It seems entirely reasonable that inputs shouldn't overlap).
Expand All @@ -118,7 +223,8 @@ AArch64LegalizerInfo::AArch64LegalizerInfo() {
for (auto Ty : {s8, s16, s32, s64, p0, v2s32})
setAction({MemOp, Ty}, Legal);

setAction({MemOp, s1}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(MemOp, 0,
widen_1_narrow_128_ToLargest);

// And everything's fine in addrspace 0.
setAction({MemOp, 1, p0}, Legal);
Expand All @@ -132,20 +238,16 @@ AArch64LegalizerInfo::AArch64LegalizerInfo() {

setAction({G_CONSTANT, p0}, Legal);

for (auto Ty : {s1, s8, s16})
setAction({TargetOpcode::G_CONSTANT, Ty}, WidenScalar);

setAction({TargetOpcode::G_FCONSTANT, s16}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(G_CONSTANT, 0, widen_1_8_16);
setLegalizeScalarToDifferentSizeStrategy(G_FCONSTANT, 0, widen_16);

setAction({G_ICMP, 1, s32}, Legal);
setAction({G_ICMP, 1, s64}, Legal);
setAction({G_ICMP, 1, p0}, Legal);

for (auto Ty : {s1, s8, s16}) {
setAction({G_ICMP, Ty}, WidenScalar);
setAction({G_FCMP, Ty}, WidenScalar);
setAction({G_ICMP, 1, Ty}, WidenScalar);
}
setLegalizeScalarToDifferentSizeStrategy(G_ICMP, 0, widen_1_8_16);
setLegalizeScalarToDifferentSizeStrategy(G_FCMP, 0, widen_1_8_16);
setLegalizeScalarToDifferentSizeStrategy(G_ICMP, 1, widen_1_8_16);

setAction({G_ICMP, s32}, Legal);
setAction({G_FCMP, s32}, Legal);
Expand All @@ -159,12 +261,6 @@ AArch64LegalizerInfo::AArch64LegalizerInfo() {
setAction({G_ANYEXT, Ty}, Legal);
}

for (auto Ty : { s1, s8, s16, s32 }) {
setAction({G_ZEXT, 1, Ty}, Legal);
setAction({G_SEXT, 1, Ty}, Legal);
setAction({G_ANYEXT, 1, Ty}, Legal);
}

// FP conversions
for (auto Ty : { s16, s32 }) {
setAction({G_FPTRUNC, Ty}, Legal);
Expand All @@ -176,25 +272,17 @@ AArch64LegalizerInfo::AArch64LegalizerInfo() {
setAction({G_FPEXT, Ty}, Legal);
}

for (auto Ty : { s1, s8, s16, s32 })
setAction({G_TRUNC, Ty}, Legal);

for (auto Ty : { s8, s16, s32, s64 })
setAction({G_TRUNC, 1, Ty}, Legal);

// Conversions
for (auto Ty : { s32, s64 }) {
setAction({G_FPTOSI, 0, Ty}, Legal);
setAction({G_FPTOUI, 0, Ty}, Legal);
setAction({G_SITOFP, 1, Ty}, Legal);
setAction({G_UITOFP, 1, Ty}, Legal);
}
for (auto Ty : { s1, s8, s16 }) {
setAction({G_FPTOSI, 0, Ty}, WidenScalar);
setAction({G_FPTOUI, 0, Ty}, WidenScalar);
setAction({G_SITOFP, 1, Ty}, WidenScalar);
setAction({G_UITOFP, 1, Ty}, WidenScalar);
}
setLegalizeScalarToDifferentSizeStrategy(G_FPTOSI, 0, widen_1_8_16);
setLegalizeScalarToDifferentSizeStrategy(G_FPTOUI, 0, widen_1_8_16);
setLegalizeScalarToDifferentSizeStrategy(G_SITOFP, 1, widen_1_8_16);
setLegalizeScalarToDifferentSizeStrategy(G_UITOFP, 1, widen_1_8_16);

for (auto Ty : { s32, s64 }) {
setAction({G_FPTOSI, 1, Ty}, Legal);
Expand All @@ -209,8 +297,7 @@ AArch64LegalizerInfo::AArch64LegalizerInfo() {
setAction({G_BRINDIRECT, p0}, Legal);

// Select
for (auto Ty : {s1, s8, s16})
setAction({G_SELECT, Ty}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(G_SELECT, 0, widen_1_8_16);

for (auto Ty : {s32, s64, p0})
setAction({G_SELECT, Ty}, Legal);
Expand Down
71 changes: 58 additions & 13 deletions llvm/lib/Target/ARM/ARMLegalizerInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,54 @@

using namespace llvm;

/// FIXME: The following static functions are SizeChangeStrategy functions
/// that are meant to temporarily mimic the behaviour of the old legalization
/// based on doubling/halving non-legal types as closely as possible. This is
/// not entirly possible as only legalizing the types that are exactly a power
/// of 2 times the size of the legal types would require specifying all those
/// sizes explicitly.
/// In practice, not specifying those isn't a problem, and the below functions
/// should disappear quickly as we add support for legalizing non-power-of-2
/// sized types further.
static void
addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result,
const LegalizerInfo::SizeAndActionsVec &v) {
for (unsigned i = 0; i < v.size(); ++i) {
result.push_back(v[i]);
if (i + 1 < v[i].first && i + 1 < v.size() &&
v[i + 1].first != v[i].first + 1)
result.push_back({v[i].first + 1, LegalizerInfo::Unsupported});
}
}

static LegalizerInfo::SizeAndActionsVec
widen_8_16(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 17);
LegalizerInfo::SizeAndActionsVec result = {
{1, LegalizerInfo::Unsupported},
{8, LegalizerInfo::WidenScalar}, {9, LegalizerInfo::Unsupported},
{16, LegalizerInfo::WidenScalar}, {17, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
result.push_back({Largest + 1, LegalizerInfo::Unsupported});
return result;
}

static LegalizerInfo::SizeAndActionsVec
widen_1_8_16(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 17);
LegalizerInfo::SizeAndActionsVec result = {
{1, LegalizerInfo::WidenScalar}, {2, LegalizerInfo::Unsupported},
{8, LegalizerInfo::WidenScalar}, {9, LegalizerInfo::Unsupported},
{16, LegalizerInfo::WidenScalar}, {17, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
result.push_back({Largest + 1, LegalizerInfo::Unsupported});
return result;
}

static bool AEABI(const ARMSubtarget &ST) {
return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI();
}
Expand All @@ -49,23 +97,23 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
}

for (unsigned Op : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) {
for (auto Ty : {s1, s8, s16})
setAction({Op, Ty}, WidenScalar);
if (Op != G_ADD)
setLegalizeScalarToDifferentSizeStrategy(
Op, 0, widenToLargerTypesUnsupportedOtherwise);
setAction({Op, s32}, Legal);
}

for (unsigned Op : {G_SDIV, G_UDIV}) {
for (auto Ty : {s8, s16})
setAction({Op, Ty}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(Op, 0,
widenToLargerTypesUnsupportedOtherwise);
if (ST.hasDivideInARMMode())
setAction({Op, s32}, Legal);
else
setAction({Op, s32}, Libcall);
}

for (unsigned Op : {G_SREM, G_UREM}) {
for (auto Ty : {s8, s16})
setAction({Op, Ty}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(Op, 0, widen_8_16);
if (ST.hasDivideInARMMode())
setAction({Op, s32}, Lower);
else if (AEABI(ST))
Expand All @@ -74,10 +122,8 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
setAction({Op, s32}, Libcall);
}

for (unsigned Op : {G_SEXT, G_ZEXT}) {
for (unsigned Op : {G_SEXT, G_ZEXT, G_ANYEXT}) {
setAction({Op, s32}, Legal);
for (auto Ty : {s1, s8, s16})
setAction({Op, 1, Ty}, Legal);
}

for (unsigned Op : {G_ASHR, G_LSHR, G_SHL})
Expand All @@ -93,12 +139,11 @@ ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) {
setAction({G_BRCOND, s1}, Legal);

setAction({G_CONSTANT, s32}, Legal);
for (auto Ty : {s1, s8, s16})
setAction({G_CONSTANT, Ty}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(G_CONSTANT, 0, widen_1_8_16);

setAction({G_ICMP, s1}, Legal);
for (auto Ty : {s8, s16})
setAction({G_ICMP, 1, Ty}, WidenScalar);
setLegalizeScalarToDifferentSizeStrategy(G_ICMP, 1,
widenToLargerTypesUnsupportedOtherwise);
for (auto Ty : {s32, p0})
setAction({G_ICMP, 1, Ty}, Legal);

Expand Down
66 changes: 44 additions & 22 deletions llvm/lib/Target/X86/X86LegalizerInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,38 @@
using namespace llvm;
using namespace TargetOpcode;

/// FIXME: The following static functions are SizeChangeStrategy functions
/// that are meant to temporarily mimic the behaviour of the old legalization
/// based on doubling/halving non-legal types as closely as possible. This is
/// not entirly possible as only legalizing the types that are exactly a power
/// of 2 times the size of the legal types would require specifying all those
/// sizes explicitly.
/// In practice, not specifying those isn't a problem, and the below functions
/// should disappear quickly as we add support for legalizing non-power-of-2
/// sized types further.
static void
addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result,
const LegalizerInfo::SizeAndActionsVec &v) {
for (unsigned i = 0; i < v.size(); ++i) {
result.push_back(v[i]);
if (i + 1 < v[i].first && i + 1 < v.size() &&
v[i + 1].first != v[i].first + 1)
result.push_back({v[i].first + 1, LegalizerInfo::Unsupported});
}
}

static LegalizerInfo::SizeAndActionsVec
widen_1(const LegalizerInfo::SizeAndActionsVec &v) {
assert(v.size() >= 1);
assert(v[0].first > 1);
LegalizerInfo::SizeAndActionsVec result = {{1, LegalizerInfo::WidenScalar},
{2, LegalizerInfo::Unsupported}};
addAndInterleaveWithUnsupported(result, v);
auto Largest = result.back().first;
result.push_back({Largest + 1, LegalizerInfo::Unsupported});
return result;
}

X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
const X86TargetMachine &TM)
: Subtarget(STI), TM(TM) {
Expand All @@ -37,6 +69,17 @@ X86LegalizerInfo::X86LegalizerInfo(const X86Subtarget &STI,
setLegalizerInfoAVX512DQ();
setLegalizerInfoAVX512BW();

setLegalizeScalarToDifferentSizeStrategy(G_PHI, 0, widen_1);
for (unsigned BinOp : {G_SUB, G_MUL, G_AND, G_OR, G_XOR})
setLegalizeScalarToDifferentSizeStrategy(BinOp, 0, widen_1);
for (unsigned MemOp : {G_LOAD, G_STORE})
setLegalizeScalarToDifferentSizeStrategy(MemOp, 0,
narrowToSmallerAndWidenToSmallest);
setLegalizeScalarToDifferentSizeStrategy(
G_GEP, 1, widenToLargerTypesUnsupportedOtherwise);
setLegalizeScalarToDifferentSizeStrategy(
G_CONSTANT, 0, widenToLargerTypesAndNarrowToLargest);

computeTables();
}

Expand All @@ -47,23 +90,17 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
const LLT s8 = LLT::scalar(8);
const LLT s16 = LLT::scalar(16);
const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);

for (auto Ty : {p0, s1, s8, s16, s32})
setAction({G_IMPLICIT_DEF, Ty}, Legal);

for (auto Ty : {s8, s16, s32, p0})
setAction({G_PHI, Ty}, Legal);

setAction({G_PHI, s1}, WidenScalar);

for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) {
for (unsigned BinOp : {G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR})
for (auto Ty : {s8, s16, s32})
setAction({BinOp, Ty}, Legal);

setAction({BinOp, s1}, WidenScalar);
}

for (unsigned Op : {G_UADDE}) {
setAction({Op, s32}, Legal);
setAction({Op, 1, s1}, Legal);
Expand All @@ -73,7 +110,6 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
for (auto Ty : {s8, s16, s32, p0})
setAction({MemOp, Ty}, Legal);

setAction({MemOp, s1}, WidenScalar);
// And everything's fine in addrspace 0.
setAction({MemOp, 1, p0}, Legal);
}
Expand All @@ -85,32 +121,20 @@ void X86LegalizerInfo::setLegalizerInfo32bit() {
setAction({G_GEP, p0}, Legal);
setAction({G_GEP, 1, s32}, Legal);

for (auto Ty : {s1, s8, s16})
setAction({G_GEP, 1, Ty}, WidenScalar);

// Control-flow
setAction({G_BRCOND, s1}, Legal);

// Constants
for (auto Ty : {s8, s16, s32, p0})
setAction({TargetOpcode::G_CONSTANT, Ty}, Legal);

setAction({TargetOpcode::G_CONSTANT, s1}, WidenScalar);
setAction({TargetOpcode::G_CONSTANT, s64}, NarrowScalar);

// Extensions
for (auto Ty : {s8, s16, s32}) {
setAction({G_ZEXT, Ty}, Legal);
setAction({G_SEXT, Ty}, Legal);
setAction({G_ANYEXT, Ty}, Legal);
}

for (auto Ty : {s1, s8, s16}) {
setAction({G_ZEXT, 1, Ty}, Legal);
setAction({G_SEXT, 1, Ty}, Legal);
setAction({G_ANYEXT, 1, Ty}, Legal);
}

// Comparison
setAction({G_ICMP, s1}, Legal);

Expand All @@ -123,7 +147,6 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
if (!Subtarget.is64Bit())
return;

const LLT s32 = LLT::scalar(32);
const LLT s64 = LLT::scalar(64);

setAction({G_IMPLICIT_DEF, s64}, Legal);
Expand All @@ -145,7 +168,6 @@ void X86LegalizerInfo::setLegalizerInfo64bit() {
// Extensions
for (unsigned extOp : {G_ZEXT, G_SEXT, G_ANYEXT}) {
setAction({extOp, s64}, Legal);
setAction({extOp, 1, s32}, Legal);
}

// Comparison
Expand Down
67 changes: 67 additions & 0 deletions llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,70 @@ end:
%vec = load <2 x i16*>, <2 x i16*>* undef
br label %block
}

; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %vreg1<def>(s96) = G_INSERT %vreg2, %vreg0, 0; (in function: nonpow2_insertvalue_narrowing
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg2<def>(s96) = G_IMPLICIT_DEF; (in function: nonpow2_insertvalue_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_insertvalue_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_insertvalue_narrowing:
%struct96 = type { float, float, float }
define void @nonpow2_insertvalue_narrowing(float %a) {
%dummy = insertvalue %struct96 undef, float %a, 0
ret void
}

; FALLBACK-WITH-REPORT-ERR remark: <unknown>:0:0: unable to legalize instruction: %vreg3<def>(s96) = G_ADD %vreg2, %vreg2; (in function: nonpow2_add_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_add_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_add_narrowing:
define void @nonpow2_add_narrowing() {
%a = add i128 undef, undef
%b = trunc i128 %a to i96
%dummy = add i96 %b, %b
ret void
}

; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg3<def>(s96) = G_OR %vreg2, %vreg2; (in function: nonpow2_or_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_or_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_or_narrowing:
define void @nonpow2_or_narrowing() {
%a = add i128 undef, undef
%b = trunc i128 %a to i96
%dummy = or i96 %b, %b
ret void
}

; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(s96) = G_LOAD %vreg1; mem:LD12[undef](align=16) (in function: nonpow2_load_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_load_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_load_narrowing:
define void @nonpow2_load_narrowing() {
%dummy = load i96, i96* undef
ret void
}

; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: G_STORE %vreg3, %vreg0; mem:ST12[%c](align=16) (in function: nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_store_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_store_narrowing:
define void @nonpow2_store_narrowing(i96* %c) {
%a = add i128 undef, undef
%b = trunc i128 %a to i96
store i96 %b, i96* %c
ret void
}

; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(s96) = G_CONSTANT 0; (in function: nonpow2_constant_narrowing
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_constant_narrowing
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_constant_narrowing:
define void @nonpow2_constant_narrowing() {
store i96 0, i96* undef
ret void
}

; Currently can't handle vector lengths that aren't an exact multiple of
; natively supported vector lengths. Test that the fall-back works for those.
; FALLBACK-WITH-REPORT-ERR-G_IMPLICIT_DEF-LEGALIZABLE: (FIXME: this is what is expected once we can legalize non-pow-of-2 G_IMPLICIT_DEF) remark: <unknown>:0:0: unable to legalize instruction: %vreg1<def>(<7 x s64>) = G_ADD %vreg0, %vreg0; (in function: nonpow2_vector_add_fewerelements
; FALLBACK-WITH-REPORT-ERR: remark: <unknown>:0:0: unable to legalize instruction: %vreg0<def>(<7 x s64>) = G_IMPLICIT_DEF; (in function: nonpow2_vector_add_fewerelements
; FALLBACK-WITH-REPORT-ERR: warning: Instruction selection used fallback path for nonpow2_vector_add_fewerelements
; FALLBACK-WITH-REPORT-OUT-LABEL: nonpow2_vector_add_fewerelements:
define void @nonpow2_vector_add_fewerelements() {
%dummy = add <7 x i64> undef, undef
ret void
}
91 changes: 91 additions & 0 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-add.mir
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
entry:
ret void
}
define void @test_scalar_add_big_nonpow2() {
entry:
ret void
}
define void @test_scalar_add_small() {
entry:
ret void
Expand All @@ -16,6 +20,10 @@
entry:
ret void
}
define void @test_vector_add_nonpow2() {
entry:
ret void
}
...

---
Expand Down Expand Up @@ -57,6 +65,49 @@ body: |
%x1 = COPY %8
...

---
name: test_scalar_add_big_nonpow2
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
- { id: 6, class: _ }
- { id: 7, class: _ }
- { id: 8, class: _ }
- { id: 9, class: _ }
body: |
bb.0.entry:
liveins: %x0, %x1, %x2, %x3
; CHECK-LABEL: name: test_scalar_add_big_nonpow2
; CHECK-NOT: G_MERGE_VALUES
; CHECK-NOT: G_UNMERGE_VALUES
; CHECK-DAG: [[CARRY0_32:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK-DAG: [[CARRY0:%[0-9]+]]:_(s1) = G_TRUNC [[CARRY0_32]]
; CHECK: [[RES_LO:%[0-9]+]]:_(s64), [[CARRY1:%[0-9]+]]:_(s1) = G_UADDE %0, %1, [[CARRY0]]
; CHECK: [[RES_MI:%[0-9]+]]:_(s64), [[CARRY2:%[0-9]+]]:_(s1) = G_UADDE %1, %2, [[CARRY1]]
; CHECK: [[RES_HI:%[0-9]+]]:_(s64), {{%.*}}(s1) = G_UADDE %2, %3, [[CARRY2]]
; CHECK-NOT: G_MERGE_VALUES
; CHECK-NOT: G_UNMERGE_VALUES
; CHECK: %x0 = COPY [[RES_LO]]
; CHECK: %x1 = COPY [[RES_MI]]
; CHECK: %x2 = COPY [[RES_HI]]
%0(s64) = COPY %x0
%1(s64) = COPY %x1
%2(s64) = COPY %x2
%3(s64) = COPY %x3
%4(s192) = G_MERGE_VALUES %0, %1, %2
%5(s192) = G_MERGE_VALUES %1, %2, %3
%6(s192) = G_ADD %4, %5
%7(s64), %8(s64), %9(s64) = G_UNMERGE_VALUES %6
%x0 = COPY %7
%x1 = COPY %8
%x2 = COPY %9
...

---
name: test_scalar_add_small
registers:
Expand Down Expand Up @@ -124,3 +175,43 @@ body: |
%q0 = COPY %7
%q1 = COPY %8
...
---
name: test_vector_add_nonpow2
registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
- { id: 3, class: _ }
- { id: 4, class: _ }
- { id: 5, class: _ }
- { id: 6, class: _ }
- { id: 7, class: _ }
- { id: 8, class: _ }
- { id: 9, class: _ }
body: |
bb.0.entry:
liveins: %q0, %q1, %q2, %q3
; CHECK-LABEL: name: test_vector_add_nonpow2
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
; CHECK: [[RES_LO:%[0-9]+]]:_(<2 x s64>) = G_ADD %0, %1
; CHECK: [[RES_MI:%[0-9]+]]:_(<2 x s64>) = G_ADD %1, %2
; CHECK: [[RES_HI:%[0-9]+]]:_(<2 x s64>) = G_ADD %2, %3
; CHECK-NOT: G_EXTRACT
; CHECK-NOT: G_SEQUENCE
; CHECK: %q0 = COPY [[RES_LO]]
; CHECK: %q1 = COPY [[RES_MI]]
; CHECK: %q2 = COPY [[RES_HI]]
%0(<2 x s64>) = COPY %q0
%1(<2 x s64>) = COPY %q1
%2(<2 x s64>) = COPY %q2
%3(<2 x s64>) = COPY %q3
%4(<6 x s64>) = G_MERGE_VALUES %0, %1, %2
%5(<6 x s64>) = G_MERGE_VALUES %1, %2, %3
%6(<6 x s64>) = G_ADD %4, %5
%7(<2 x s64>), %8(<2 x s64>), %9(<2 x s64>) = G_UNMERGE_VALUES %6
%q0 = COPY %7
%q1 = COPY %8
%q2 = COPY %9
...
19 changes: 19 additions & 0 deletions llvm/test/CodeGen/AArch64/GlobalISel/legalize-inserts.mir
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
define void @test_inserts_4() { ret void }
define void @test_inserts_5() { ret void }
define void @test_inserts_6() { ret void }
define void @test_inserts_nonpow2() { ret void }
...

---
Expand Down Expand Up @@ -141,3 +142,21 @@ body: |
%4:_(s128) = G_INSERT %3, %2, 32
RET_ReallyLR
...

---
name: test_inserts_nonpow2
body: |
bb.0:
liveins: %x0, %x1, %x2
; CHECK-LABEL: name: test_inserts_nonpow2
; CHECK: %5:_(s192) = G_MERGE_VALUES %3(s64), %1(s64), %2(s64)
%0:_(s64) = COPY %x0
%1:_(s64) = COPY %x1
%2:_(s64) = COPY %x2
%3:_(s64) = COPY %x3
%4:_(s192) = G_MERGE_VALUES %0, %1, %2
%5:_(s192) = G_INSERT %4, %3, 0
RET_ReallyLR
...
16 changes: 10 additions & 6 deletions llvm/test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir
Original file line number Diff line number Diff line change
Expand Up @@ -970,24 +970,28 @@ registers:
- { id: 1, class: gprb }
- { id: 2, class: gprb }
- { id: 3, class: gprb }
- { id: 4, class: gprb }
body: |
bb.0:
liveins: %r0, %r1
liveins: %r0, %r1, %r2
%0(p0) = COPY %r0
; CHECK: [[VREGX:%[0-9]+]]:gpr = COPY %r0
%1(p0) = COPY %r1
; CHECK: [[VREGY:%[0-9]+]]:gpr = COPY %r1
%2(s1) = G_TRUNC %1(p0)
; CHECK: [[VREGC:%[0-9]+]]:gpr = COPY [[VREGY]]
%2(s32) = COPY %r2
; CHECK: [[VREGC:%[0-9]+]]:gpr = COPY %r2
%3(p0) = G_SELECT %2(s1), %0, %1
; CHECK: CMPri [[VREGC]], 0, 14, _, implicit-def %cpsr
%3(s1) = G_TRUNC %2(s32)
; CHECK: [[VREGD:%[0-9]+]]:gpr = COPY [[VREGC]]
%4(p0) = G_SELECT %3(s1), %0, %1
; CHECK: CMPri [[VREGD]], 0, 14, _, implicit-def %cpsr
; CHECK: [[RES:%[0-9]+]]:gpr = MOVCCr [[VREGX]], [[VREGY]], 0, %cpsr
%r0 = COPY %3(p0)
%r0 = COPY %4(p0)
; CHECK: %r0 = COPY [[RES]]
BX_RET 14, _, implicit %r0
Expand Down
119 changes: 89 additions & 30 deletions llvm/unittests/CodeGen/GlobalISel/LegalizerInfoTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,83 +49,113 @@ TEST(LegalizerInfoTest, ScalarRISC) {
using namespace TargetOpcode;
LegalizerInfo L;
// Typical RISCy set of operations based on AArch64.
L.setAction({G_ADD, LLT::scalar(8)}, LegalizerInfo::WidenScalar);
L.setAction({G_ADD, LLT::scalar(16)}, LegalizerInfo::WidenScalar);
L.setAction({G_ADD, LLT::scalar(32)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::scalar(64)}, LegalizerInfo::Legal);
for (auto Op : {G_ADD, G_SUB}) {
for (unsigned Size : {32, 64})
L.setAction({Op, 0, LLT::scalar(Size)}, LegalizerInfo::Legal);
L.setLegalizeScalarToDifferentSizeStrategy(
Op, 0, LegalizerInfo::widenToLargerTypesAndNarrowToLargest);
}

L.computeTables();

// Check we infer the correct types and actually do what we're told.
ASSERT_EQ(L.getAction({G_ADD, LLT::scalar(8)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({G_ADD, LLT::scalar(16)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({G_ADD, LLT::scalar(32)}),
std::make_pair(LegalizerInfo::Legal, LLT::scalar(32)));
ASSERT_EQ(L.getAction({G_ADD, LLT::scalar(64)}),
std::make_pair(LegalizerInfo::Legal, LLT::scalar(64)));

// Make sure the default for over-sized types applies.
ASSERT_EQ(L.getAction({G_ADD, LLT::scalar(128)}),
std::make_pair(LegalizerInfo::NarrowScalar, LLT::scalar(64)));
for (auto &opcode : {G_ADD, G_SUB}) {
// Check we infer the correct types and actually do what we're told.
ASSERT_EQ(L.getAction({opcode, LLT::scalar(8)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(16)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(32)}),
std::make_pair(LegalizerInfo::Legal, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(64)}),
std::make_pair(LegalizerInfo::Legal, LLT::scalar(64)));

// Make sure the default for over-sized types applies.
ASSERT_EQ(L.getAction({opcode, LLT::scalar(128)}),
std::make_pair(LegalizerInfo::NarrowScalar, LLT::scalar(64)));
// Make sure we also handle unusual sizes
ASSERT_EQ(L.getAction({opcode, LLT::scalar(1)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(31)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(33)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(64)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(63)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(64)));
ASSERT_EQ(L.getAction({opcode, LLT::scalar(65)}),
std::make_pair(LegalizerInfo::NarrowScalar, LLT::scalar(64)));
}
}

TEST(LegalizerInfoTest, VectorRISC) {
using namespace TargetOpcode;
LegalizerInfo L;
// Typical RISCy set of operations based on ARM.
L.setScalarInVectorAction(G_ADD, LLT::scalar(8), LegalizerInfo::Legal);
L.setScalarInVectorAction(G_ADD, LLT::scalar(16), LegalizerInfo::Legal);
L.setScalarInVectorAction(G_ADD, LLT::scalar(32), LegalizerInfo::Legal);

L.setAction({G_ADD, LLT::vector(8, 8)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(16, 8)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(4, 16)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(8, 16)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(2, 32)}, LegalizerInfo::Legal);
L.setAction({G_ADD, LLT::vector(4, 32)}, LegalizerInfo::Legal);

L.setLegalizeVectorElementToDifferentSizeStrategy(
G_ADD, 0, LegalizerInfo::widenToLargerTypesUnsupportedOtherwise);

L.setAction({G_ADD, 0, LLT::scalar(32)}, LegalizerInfo::Legal);

L.computeTables();

// Check we infer the correct types and actually do what we're told for some
// simple cases.
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(2, 8)}),
std::make_pair(LegalizerInfo::MoreElements, LLT::vector(8, 8)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(8, 8)}),
std::make_pair(LegalizerInfo::Legal, LLT::vector(8, 8)));
ASSERT_EQ(
L.getAction({G_ADD, LLT::vector(8, 32)}),
std::make_pair(LegalizerInfo::FewerElements, LLT::vector(4, 32)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(8, 7)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::vector(8, 8)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(2, 8)}),
std::make_pair(LegalizerInfo::MoreElements, LLT::vector(8, 8)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(8, 32)}),
std::make_pair(LegalizerInfo::FewerElements, LLT::vector(4, 32)));
// Check a few non-power-of-2 sizes:
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(3, 3)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::vector(3, 8)));
ASSERT_EQ(L.getAction({G_ADD, LLT::vector(3, 8)}),
std::make_pair(LegalizerInfo::MoreElements, LLT::vector(8, 8)));
}

TEST(LegalizerInfoTest, MultipleTypes) {
using namespace TargetOpcode;
LegalizerInfo L;
LLT p0 = LLT::pointer(0, 64);
LLT s32 = LLT::scalar(32);
LLT s64 = LLT::scalar(64);

// Typical RISCy set of operations based on AArch64.
L.setAction({G_PTRTOINT, 0, s64}, LegalizerInfo::Legal);
L.setAction({G_PTRTOINT, 1, p0}, LegalizerInfo::Legal);

L.setAction({G_PTRTOINT, 0, s32}, LegalizerInfo::WidenScalar);
L.setLegalizeScalarToDifferentSizeStrategy(
G_PTRTOINT, 0, LegalizerInfo::widenToLargerTypesAndNarrowToLargest);

L.computeTables();

// Check we infer the correct types and actually do what we're told.
ASSERT_EQ(L.getAction({G_PTRTOINT, 0, s64}),
std::make_pair(LegalizerInfo::Legal, s64));
ASSERT_EQ(L.getAction({G_PTRTOINT, 1, p0}),
std::make_pair(LegalizerInfo::Legal, p0));
// Make sure we also handle unusual sizes
ASSERT_EQ(L.getAction({G_PTRTOINT, 0, LLT::scalar(65)}),
std::make_pair(LegalizerInfo::NarrowScalar, s64));
ASSERT_EQ(L.getAction({G_PTRTOINT, 1, LLT::pointer(0, 32)}),
std::make_pair(LegalizerInfo::Unsupported, LLT::pointer(0, 32)));
}

TEST(LegalizerInfoTest, MultipleSteps) {
using namespace TargetOpcode;
LegalizerInfo L;
LLT s16 = LLT::scalar(16);
LLT s32 = LLT::scalar(32);
LLT s64 = LLT::scalar(64);

L.setAction({G_UREM, 0, s16}, LegalizerInfo::WidenScalar);
L.setLegalizeScalarToDifferentSizeStrategy(
G_UREM, 0, LegalizerInfo::widenToLargerTypesUnsupportedOtherwise);
L.setAction({G_UREM, 0, s32}, LegalizerInfo::Lower);
L.setAction({G_UREM, 0, s64}, LegalizerInfo::Lower);

Expand All @@ -136,4 +166,33 @@ TEST(LegalizerInfoTest, MultipleSteps) {
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(32)}),
std::make_pair(LegalizerInfo::Lower, LLT::scalar(32)));
}

TEST(LegalizerInfoTest, SizeChangeStrategy) {
using namespace TargetOpcode;
LegalizerInfo L;
for (unsigned Size : {1, 8, 16, 32})
L.setAction({G_UREM, 0, LLT::scalar(Size)}, LegalizerInfo::Legal);

L.setLegalizeScalarToDifferentSizeStrategy(
G_UREM, 0, LegalizerInfo::widenToLargerTypesUnsupportedOtherwise);
L.computeTables();

// Check we infer the correct types and actually do what we're told.
for (unsigned Size : {1, 8, 16, 32}) {
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(Size)}),
std::make_pair(LegalizerInfo::Legal, LLT::scalar(Size)));
}
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(2)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(8)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(7)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(8)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(9)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(16)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(17)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(31)}),
std::make_pair(LegalizerInfo::WidenScalar, LLT::scalar(32)));
ASSERT_EQ(L.getAction({G_UREM, LLT::scalar(33)}),
std::make_pair(LegalizerInfo::Unsupported, LLT::scalar(33)));
}
}
78 changes: 8 additions & 70 deletions llvm/unittests/CodeGen/LowLevelTypeTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,36 +36,22 @@ TEST(LowLevelTypeTest, Scalar) {

for (unsigned S : {1U, 17U, 32U, 64U, 0xfffffU}) {
const LLT Ty = LLT::scalar(S);
const LLT HalfTy = (S % 2) == 0 ? Ty.halfScalarSize() : Ty;
const LLT DoubleTy = Ty.doubleScalarSize();

// Test kind.
for (const LLT TestTy : {Ty, HalfTy, DoubleTy}) {
ASSERT_TRUE(TestTy.isValid());
ASSERT_TRUE(TestTy.isScalar());
ASSERT_TRUE(Ty.isValid());
ASSERT_TRUE(Ty.isScalar());

ASSERT_FALSE(TestTy.isPointer());
ASSERT_FALSE(TestTy.isVector());
}
ASSERT_FALSE(Ty.isPointer());
ASSERT_FALSE(Ty.isVector());

// Test sizes.
EXPECT_EQ(S, Ty.getSizeInBits());
EXPECT_EQ(S, Ty.getScalarSizeInBits());

EXPECT_EQ(S*2, DoubleTy.getSizeInBits());
EXPECT_EQ(S*2, DoubleTy.getScalarSizeInBits());

if ((S % 2) == 0) {
EXPECT_EQ(S/2, HalfTy.getSizeInBits());
EXPECT_EQ(S/2, HalfTy.getScalarSizeInBits());
}

// Test equality operators.
EXPECT_TRUE(Ty == Ty);
EXPECT_FALSE(Ty != Ty);

EXPECT_NE(Ty, DoubleTy);

// Test Type->LLT conversion.
Type *IRTy = IntegerType::get(C, S);
EXPECT_EQ(Ty, getLLTForType(*IRTy, DL));
Expand All @@ -90,73 +76,25 @@ TEST(LowLevelTypeTest, Vector) {
// Test getElementType().
EXPECT_EQ(STy, VTy.getElementType());

const LLT HalfSzTy = ((S % 2) == 0) ? VTy.halfScalarSize() : VTy;
const LLT DoubleSzTy = VTy.doubleScalarSize();

// halfElements requires an even number of elements.
const LLT HalfEltIfEvenTy = ((Elts % 2) == 0) ? VTy.halfElements() : VTy;
const LLT DoubleEltTy = VTy.doubleElements();

// Test kind.
for (const LLT TestTy : {VTy, HalfSzTy, DoubleSzTy, DoubleEltTy}) {
ASSERT_TRUE(TestTy.isValid());
ASSERT_TRUE(TestTy.isVector());

ASSERT_FALSE(TestTy.isScalar());
ASSERT_FALSE(TestTy.isPointer());
}

// Test halving elements to a scalar.
{
ASSERT_TRUE(HalfEltIfEvenTy.isValid());
ASSERT_FALSE(HalfEltIfEvenTy.isPointer());
if (Elts > 2) {
ASSERT_TRUE(HalfEltIfEvenTy.isVector());
} else {
ASSERT_FALSE(HalfEltIfEvenTy.isVector());
EXPECT_EQ(STy, HalfEltIfEvenTy);
}
}
ASSERT_TRUE(VTy.isValid());
ASSERT_TRUE(VTy.isVector());

ASSERT_FALSE(VTy.isScalar());
ASSERT_FALSE(VTy.isPointer());

// Test sizes.
EXPECT_EQ(S * Elts, VTy.getSizeInBits());
EXPECT_EQ(S, VTy.getScalarSizeInBits());
EXPECT_EQ(Elts, VTy.getNumElements());

if ((S % 2) == 0) {
EXPECT_EQ((S / 2) * Elts, HalfSzTy.getSizeInBits());
EXPECT_EQ(S / 2, HalfSzTy.getScalarSizeInBits());
EXPECT_EQ(Elts, HalfSzTy.getNumElements());
}

EXPECT_EQ((S * 2) * Elts, DoubleSzTy.getSizeInBits());
EXPECT_EQ(S * 2, DoubleSzTy.getScalarSizeInBits());
EXPECT_EQ(Elts, DoubleSzTy.getNumElements());

if ((Elts % 2) == 0) {
EXPECT_EQ(S * (Elts / 2), HalfEltIfEvenTy.getSizeInBits());
EXPECT_EQ(S, HalfEltIfEvenTy.getScalarSizeInBits());
if (Elts > 2) {
EXPECT_EQ(Elts / 2, HalfEltIfEvenTy.getNumElements());
}
}

EXPECT_EQ(S * (Elts * 2), DoubleEltTy.getSizeInBits());
EXPECT_EQ(S, DoubleEltTy.getScalarSizeInBits());
EXPECT_EQ(Elts * 2, DoubleEltTy.getNumElements());

// Test equality operators.
EXPECT_TRUE(VTy == VTy);
EXPECT_FALSE(VTy != VTy);

// Test inequality operators on..
// ..different kind.
EXPECT_NE(VTy, STy);
// ..different #elts.
EXPECT_NE(VTy, DoubleEltTy);
// ..different scalar size.
EXPECT_NE(VTy, DoubleSzTy);

// Test Type->LLT conversion.
Type *IRSTy = IntegerType::get(C, S);
Expand Down