diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp index 714d1ae8aaec3..a2b75848ea028 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -2534,7 +2534,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { } } - if (VectorType *DestVTy = dyn_cast(DestTy)) { + if (FixedVectorType *DestVTy = dyn_cast(DestTy)) { // Beware: messing with this target-specific oddity may cause trouble. if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) { Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); @@ -2563,7 +2563,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { } } - if (VectorType *SrcVTy = dyn_cast(SrcTy)) { + if (FixedVectorType *SrcVTy = dyn_cast(SrcTy)) { if (SrcVTy->getNumElements() == 1) { // If our destination is not a vector, then make this a straight // scalar-scalar cast. diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-bitcast.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-bitcast.ll new file mode 100644 index 0000000000000..8049cad596b50 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/AArch64/sve-bitcast.ll @@ -0,0 +1,13 @@ +; RUN: opt -instcombine -mtriple=aarch64-linux-gnu -mattr=+sve -S < %s | FileCheck %s + +; We shouldn't fold bitcast(insert .., iX %val, i32 0) +; into bitcast(iX %val) for scalable vectors. +define @bitcast_of_insert_i8_i16(i16 %val) #0 { +; CHECK-LABEL: @bitcast_of_insert_i8_i16( +; CHECK-NOT: bitcast i16 %val to +; CHECK: bitcast %op2 to +entry: + %op2 = insertelement undef, i16 %val, i32 0 + %0 = bitcast %op2 to + ret %0 +}