diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 43b8bd41500c0..06db948cafcbf 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -2409,7 +2409,31 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) { Value *Vec = II->getArgOperand(0); Value *Idx = II->getArgOperand(1); - auto *DstTy = dyn_cast(II->getType()); + Type *ReturnType = II->getType(); + // (extract_vector (insert_vector InsertTuple, InsertValue, InsertIdx), + // ExtractIdx) + unsigned ExtractIdx = cast(Idx)->getZExtValue(); + Value *InsertTuple, *InsertIdx, *InsertValue; + if (match(Vec, m_Intrinsic(m_Value(InsertTuple), + m_Value(InsertValue), + m_Value(InsertIdx))) && + InsertValue->getType() == ReturnType) { + unsigned Index = cast(InsertIdx)->getZExtValue(); + // Case where we get the same index right after setting it. + // extract.vector(insert.vector(InsertTuple, InsertValue, Idx), Idx) --> + // InsertValue + if (ExtractIdx == Index) + return replaceInstUsesWith(CI, InsertValue); + // If we are getting a different index than what was set in the + // insert.vector intrinsic. We can just set the input tuple to the one up + // in the chain. extract.vector(insert.vector(InsertTuple, InsertValue, + // InsertIndex), ExtractIndex) + // --> extract.vector(InsertTuple, ExtractIndex) + else + return replaceOperand(CI, 0, InsertTuple); + } + + auto *DstTy = dyn_cast(ReturnType); auto *VecTy = dyn_cast(Vec->getType()); // Only canonicalize if the the destination vector and Vec are fixed diff --git a/llvm/test/Transforms/InstCombine/opts-tuples-extract-intrinsic.ll b/llvm/test/Transforms/InstCombine/opts-tuples-extract-intrinsic.ll new file mode 100644 index 0000000000000..4303d963ea886 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/opts-tuples-extract-intrinsic.ll @@ -0,0 +1,44 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -passes=instcombine < %s | FileCheck %s + +; Check that the redundant sequences of extract/insert are eliminated. + +; extract.vector(insert.vector(Tuple, Value, Idx), Idx) --> Value +define @test_extract_insert_same_idx( %v0, %v1) { +; CHECK-LABEL: @test_extract_insert_same_idx( +; CHECK-NEXT: ret [[V1:%.*]] +; + %vec.ins = call @llvm.vector.insert.nxv64i8.nxv16i8( %v0, %v1, i64 48) + %vec.ext = call @llvm.vector.extract.nxv16i8.nxv64i8( %vec.ins, i64 48) + ret %vec.ext +} + +; extract.vector(insert.vector(Vector, Value, InsertIndex), ExtractIndex) +; --> extract.vector(Vector, ExtractIndex) +define @test_extract_insert_dif_idx( %v0, %v1) { +; CHECK-LABEL: @test_extract_insert_dif_idx( +; CHECK-NEXT: [[VEC_EXT:%.*]] = call @llvm.vector.extract.nxv16i8.nxv64i8( [[V0:%.*]], i64 0) +; CHECK-NEXT: ret [[VEC_EXT]] +; + %vec.ins = call @llvm.vector.insert.nxv64i8.nxv16i8( %v0, %v1, i64 48) + %vec.ext = call @llvm.vector.extract.nxv16i8.nxv64i8( %vec.ins, i64 0) + ret %vec.ext +} + +; Negative test +; The extracted vector-size != inserted vector-size +define @neg_test_extract_insert_same_idx_dif_ret_size( %v0, %v1) { +; CHECK-LABEL: @neg_test_extract_insert_same_idx_dif_ret_size( +; CHECK-NEXT: [[VEC_INS:%.*]] = call @llvm.vector.insert.nxv64i8.nxv16i8( [[V0:%.*]], [[V1:%.*]], i64 32) +; CHECK-NEXT: [[VEC_EXT:%.*]] = call @llvm.vector.extract.nxv32i8.nxv64i8( [[VEC_INS]], i64 32) +; CHECK-NEXT: ret [[VEC_EXT]] +; + %vec.ins = call @llvm.vector.insert.nxv64i8.nxv16i8( %v0, %v1, i64 32) + %vec.ext = call @llvm.vector.extract.nxv32i8.nxv64i8( %vec.ins, i64 32) + ret %vec.ext +} + + +declare @llvm.vector.insert.nxv64i8.nxv16i8(, , i64) +declare @llvm.vector.extract.nxv16i8.nxv64i8(, i64) +declare @llvm.vector.extract.nxv32i8.nxv64i8(, i64)