diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 032b41010fa18..561e457dacda2 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -2340,9 +2340,9 @@ bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1, bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) { if (!MOP.isReg()) return false; - // MIPatternMatch doesn't let us look through G_ZEXT etc. - auto ValAndVReg = getIConstantVRegValWithLookThrough(MOP.getReg(), MRI); - return ValAndVReg && ValAndVReg->Value == C; + auto *MI = MRI.getVRegDef(MOP.getReg()); + auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI); + return MaybeCst.hasValue() && MaybeCst->getSExtValue() == C; } bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI, diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul.mir index 5c163f75f47c3..9812ea7708df2 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-mul.mir @@ -31,15 +31,12 @@ machineFunctionInfo: {} body: | bb.0: liveins: $q0 - ; Currently not implemented. ; CHECK-LABEL: name: mul_vector_by_zero ; CHECK: liveins: $q0 ; CHECK-NEXT: {{ $}} - ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32) - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[COPY]], [[BUILD_VECTOR]] - ; CHECK-NEXT: $q0 = COPY [[MUL]](<4 x s32>) + ; CHECK-NEXT: $q0 = COPY [[BUILD_VECTOR]](<4 x s32>) %0:_(<4 x s32>) = COPY $q0 %1:_(s32) = G_CONSTANT i32 0 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32) @@ -76,15 +73,11 @@ machineFunctionInfo: {} body: | bb.0: liveins: $q0 - ; Currently not implemented. ; CHECK-LABEL: name: mul_vector_by_one ; CHECK: liveins: $q0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 - ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 - ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32) - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[COPY]], [[BUILD_VECTOR]] - ; CHECK-NEXT: $q0 = COPY [[MUL]](<4 x s32>) + ; CHECK-NEXT: $q0 = COPY [[COPY]](<4 x s32>) %0:_(<4 x s32>) = COPY $q0 %1:_(s32) = G_CONSTANT i32 1 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32) @@ -123,15 +116,14 @@ machineFunctionInfo: {} body: | bb.0: liveins: $q0 - ; Currently not implemented. ; CHECK-LABEL: name: mul_vector_by_neg_one ; CHECK: liveins: $q0 ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $q0 - ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 + ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32) - ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<4 x s32>) = G_MUL [[COPY]], [[BUILD_VECTOR]] - ; CHECK-NEXT: $q0 = COPY [[MUL]](<4 x s32>) + ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(<4 x s32>) = G_SUB [[BUILD_VECTOR]], [[COPY]] + ; CHECK-NEXT: $q0 = COPY [[SUB]](<4 x s32>) %0:_(<4 x s32>) = COPY $q0 %1:_(s32) = G_CONSTANT i32 -1 %2:_(<4 x s32>) = G_BUILD_VECTOR %1(s32), %1(s32), %1(s32), %1(s32) diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir index bea0328d3d088..7bd325b2a5e62 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-trivial-arith.mir @@ -378,3 +378,35 @@ body: | $w0 = COPY %rot(s32) RET_ReallyLR implicit $w0 ... +--- +name: lshr_of_vec_zero +body: | + bb.1: + liveins: $q0 + ; CHECK-LABEL: name: lshr_of_vec_zero + ; CHECK: [[COPY:%[0-9]+]]:_(<8 x s16>) = COPY $q0 + ; CHECK-NEXT: $q0 = COPY [[COPY]](<8 x s16>) + ; CHECK-NEXT: RET_ReallyLR implicit $q0 + %0:_(<8 x s16>) = COPY $q0 + %5:_(s16) = G_CONSTANT i16 0 + %zero_vec:_(<8 x s16>) = G_BUILD_VECTOR %5(s16), %5(s16), %5(s16), %5(s16), %5(s16), %5(s16), %5(s16), %5(s16) + %shift:_(<8 x s16>) = G_LSHR %0, %zero_vec(<8 x s16>) + $q0 = COPY %shift(<8 x s16>) + RET_ReallyLR implicit $q0 +... +--- +name: ptradd_of_vec_zero +body: | + bb.1: + liveins: $q0 + ; CHECK-LABEL: name: ptradd_of_vec_zero + ; CHECK: [[COPY:%[0-9]+]]:_(<2 x p0>) = COPY $q0 + ; CHECK-NEXT: $q0 = COPY [[COPY]](<2 x p0>) + ; CHECK-NEXT: RET_ReallyLR implicit $q0 + %0:_(<2 x p0>) = COPY $q0 + %5:_(s64) = G_CONSTANT i64 0 + %zero_vec:_(<2 x s64>) = G_BUILD_VECTOR %5(s64), %5(s64) + %ptr:_(<2 x p0>) = G_PTR_ADD %0, %zero_vec(<2 x s64>) + $q0 = COPY %ptr(<2 x p0>) + RET_ReallyLR implicit $q0 +...