diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp index 4ea75409252bd..b6b3a95f35c76 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp @@ -4611,5 +4611,15 @@ Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) { return replaceOperand(SI, 2, ConstantInt::get(FalseVal->getType(), 0)); } + Value *MaskedLoadPtr; + const APInt *MaskedLoadAlignment; + if (match(TrueVal, m_OneUse(m_MaskedLoad(m_Value(MaskedLoadPtr), + m_APInt(MaskedLoadAlignment), + m_Specific(CondVal), m_Value())))) + return replaceInstUsesWith( + SI, Builder.CreateMaskedLoad(TrueVal->getType(), MaskedLoadPtr, + Align(MaskedLoadAlignment->getZExtValue()), + CondVal, FalseVal)); + return nullptr; } diff --git a/llvm/test/Transforms/InstCombine/select-masked_load.ll b/llvm/test/Transforms/InstCombine/select-masked_load.ll index b6bac612d6f9b..22e30ac019a5d 100644 --- a/llvm/test/Transforms/InstCombine/select-masked_load.ll +++ b/llvm/test/Transforms/InstCombine/select-masked_load.ll @@ -26,8 +26,7 @@ define <4 x i32> @masked_load_and_zero_inactive_2(ptr %ptr, <4 x i1> %mask) { ; No transform when the load's passthrough cannot be reused or altered. define <4 x i32> @masked_load_and_zero_inactive_3(ptr %ptr, <4 x i1> %mask, <4 x i32> %passthrough) { ; CHECK-LABEL: @masked_load_and_zero_inactive_3( -; CHECK-NEXT: [[LOAD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK:%.*]], <4 x i32> [[PASSTHROUGH:%.*]]) -; CHECK-NEXT: [[MASKED:%.*]] = select <4 x i1> [[MASK]], <4 x i32> [[LOAD]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[MASKED:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[MASK:%.*]], <4 x i32> zeroinitializer) ; CHECK-NEXT: ret <4 x i32> [[MASKED]] ; %load = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr %ptr, i32 4, <4 x i1> %mask, <4 x i32> %passthrough) @@ -116,6 +115,40 @@ entry: ret <8 x float> %1 } +define @fold_sel_into_masked_load_scalable(ptr %loc, %mask, %passthrough) { +; CHECK-LABEL: @fold_sel_into_masked_load_scalable( +; CHECK-NEXT: [[SEL:%.*]] = call @llvm.masked.load.nxv4f32.p0(ptr [[LOC:%.*]], i32 1, [[MASK:%.*]], [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: ret [[SEL]] +; + %load = call @llvm.masked.load.nxv4f32.p0(ptr %loc, i32 1, %mask, zeroinitializer) + %sel = select %mask, %load, %passthrough + ret %sel +} + +define @neg_fold_sel_into_masked_load_mask_mismatch(ptr %loc, %mask, %mask2, %passthrough) { +; CHECK-LABEL: @neg_fold_sel_into_masked_load_mask_mismatch( +; CHECK-NEXT: [[LOAD:%.*]] = call @llvm.masked.load.nxv4f32.p0(ptr [[LOC:%.*]], i32 1, [[MASK:%.*]], [[PASSTHROUGH:%.*]]) +; CHECK-NEXT: [[SEL:%.*]] = select [[MASK2:%.*]], [[LOAD]], [[PASSTHROUGH]] +; CHECK-NEXT: ret [[SEL]] +; + %load = call @llvm.masked.load.nxv4f32.p0(ptr %loc, i32 1, %mask, %passthrough) + %sel = select %mask2, %load, %passthrough + ret %sel +} + +define @fold_sel_into_masked_load_scalable_one_use_check(ptr %loc1, %mask, %passthrough, ptr %loc2) { +; CHECK-LABEL: @fold_sel_into_masked_load_scalable_one_use_check( +; CHECK-NEXT: [[LOAD:%.*]] = call @llvm.masked.load.nxv4f32.p0(ptr [[LOC:%.*]], i32 1, [[MASK:%.*]], zeroinitializer) +; CHECK-NEXT: [[SEL:%.*]] = select [[MASK]], [[LOAD]], [[PASSTHROUGH:%.*]] +; CHECK-NEXT: call void @llvm.masked.store.nxv4f32.p0( [[LOAD]], ptr [[LOC2:%.*]], i32 1, [[MASK]]) +; CHECK-NEXT: ret [[SEL]] +; + %load = call @llvm.masked.load.nxv4f32.p0(ptr %loc1, i32 1, %mask, zeroinitializer) + %sel = select %mask, %load, %passthrough + call void @llvm.masked.store.nxv4f32.p0( %load, ptr %loc2, i32 1, %mask) + ret %sel +} + declare <8 x float> @llvm.masked.load.v8f32.p0(ptr, i32 immarg, <8 x i1>, <8 x float>) declare <4 x i32> @llvm.masked.load.v4i32.p0(ptr, i32 immarg, <4 x i1>, <4 x i32>) declare <4 x float> @llvm.masked.load.v4f32.p0(ptr, i32 immarg, <4 x i1>, <4 x float>)