diff --git a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp index 8ca422cfab9fd..ff9cc3e40fd76 100644 --- a/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -4083,7 +4083,9 @@ void BoUpSLP::reorderNodeWithReuses(TreeEntry &TE, ArrayRef Mask) const { // Clear reorder since it is going to be applied to the new mask. TE.ReorderIndices.clear(); // Try to improve gathered nodes with clustered reuses, if possible. - reorderScalars(TE.Scalars, ArrayRef(NewMask).slice(0, Sz)); + SmallVector NewOrder(ArrayRef(NewMask).slice(0, Sz)); + inversePermutation(NewOrder, NewMask); + reorderScalars(TE.Scalars, NewMask); // Fill the reuses mask with the identity submasks. for (auto *It = TE.ReuseShuffleIndices.begin(), *End = TE.ReuseShuffleIndices.end(); diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll b/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll new file mode 100644 index 0000000000000..72faaa93df02e --- /dev/null +++ b/llvm/test/Transforms/SLPVectorizer/X86/reorder-clustered-node.ll @@ -0,0 +1,76 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes=slp-vectorizer -S < %s -mtriple=x86_64 -slp-threshold=-150 | FileCheck %s + +define i1 @test(ptr %arg, ptr %i233, i64 %i241, ptr %i235, ptr %i237, ptr %i227) { +; CHECK-LABEL: @test( +; CHECK-NEXT: bb: +; CHECK-NEXT: [[I226:%.*]] = getelementptr ptr, ptr [[ARG:%.*]], i32 7 +; CHECK-NEXT: [[I242:%.*]] = getelementptr double, ptr [[I233:%.*]], i64 [[I241:%.*]] +; CHECK-NEXT: [[I245:%.*]] = getelementptr double, ptr [[I235:%.*]], i64 [[I241]] +; CHECK-NEXT: [[I248:%.*]] = getelementptr double, ptr [[I237:%.*]], i64 [[I241]] +; CHECK-NEXT: [[I250:%.*]] = getelementptr double, ptr [[I227:%.*]], i64 [[I241]] +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x ptr>, ptr [[I226]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x ptr> [[TMP0]], <4 x ptr> poison, <8 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x ptr> , ptr [[I242]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x ptr> [[TMP2]], ptr [[I250]], i32 2 +; CHECK-NEXT: [[TMP4:%.*]] = icmp ult <8 x ptr> [[TMP3]], [[TMP1]] +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x ptr> poison, ptr [[I250]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x ptr> [[TMP5]], ptr [[I242]], i32 1 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x ptr> [[TMP6]], ptr [[I245]], i32 2 +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x ptr> [[TMP7]], ptr [[I248]], i32 3 +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <8 x ptr> [[TMP8]], <8 x ptr> poison, <8 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <8 x ptr> , <8 x ptr> [[TMP1]], <8 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <8 x ptr> [[TMP9]], [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <8 x i1> [[TMP11]], <8 x i1> poison, <8 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = or <8 x i1> [[TMP4]], [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = call i1 @llvm.vector.reduce.and.v8i1(<8 x i1> [[TMP13]]) +; CHECK-NEXT: [[OP_RDX:%.*]] = and i1 [[TMP14]], false +; CHECK-NEXT: ret i1 [[OP_RDX]] +; +bb: + %i226 = getelementptr ptr, ptr %arg, i32 7 + %i2271 = load ptr, ptr %i226, align 8 + %i232 = getelementptr ptr, ptr %arg, i32 8 + %i2332 = load ptr, ptr %i232, align 8 + %i234 = getelementptr ptr, ptr %arg, i32 9 + %i2353 = load ptr, ptr %i234, align 8 + %i236 = getelementptr ptr, ptr %arg, i32 10 + %i2374 = load ptr, ptr %i236, align 8 + %i240 = icmp ult ptr null, %i2332 + %i242 = getelementptr double, ptr %i233, i64 %i241 + %i243 = icmp ult ptr %i242, null + %i245 = getelementptr double, ptr %i235, i64 %i241 + %i247 = icmp ult ptr null, %i2374 + %i248 = getelementptr double, ptr %i237, i64 %i241 + %i249 = icmp ult ptr %i248, null + %i250 = getelementptr double, ptr %i227, i64 %i241 + %i251 = icmp ult ptr %i250, %i2332 + %i252 = icmp ult ptr %i242, %i2271 + %i253 = icmp ult ptr %i250, %i2353 + %i254 = icmp ult ptr %i245, %i2271 + %i255 = icmp ult ptr %i250, null + %i256 = icmp ult ptr null, %i2271 + %i257 = icmp ult ptr null, %i2353 + %i258 = icmp ult ptr %i245, null + %i259 = icmp ult ptr %i242, null + %i260 = icmp ult ptr null, %i2332 + %i261 = icmp ult ptr null, %i2374 + %i262 = icmp ult ptr %i248, null + %i263 = or i1 %i240, %i243 + %i265 = and i1 %i263, false + %i266 = or i1 %i247, %i249 + %i267 = and i1 %i265, %i266 + %i268 = or i1 %i251, %i252 + %i269 = and i1 %i267, %i268 + %i270 = or i1 %i253, %i254 + %i271 = and i1 %i269, %i270 + %i272 = or i1 %i255, %i256 + %i273 = and i1 %i271, %i272 + %i274 = or i1 %i257, %i258 + %i275 = and i1 %i273, %i274 + %i276 = or i1 %i259, %i260 + %i277 = and i1 %i275, %i276 + %i278 = or i1 %i261, %i262 + %i279 = and i1 %i277, %i278 + ret i1 %i279 +}