diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll new file mode 100644 index 00000000000000..1ed72290f2dfac --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/X86/vec-load-combine.ll @@ -0,0 +1,72 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -passes="default" -S < %s | FileCheck %s --check-prefix=SSE +; RUN: opt -passes="default" -S -mattr=avx < %s | FileCheck %s --check-prefix=AVX + +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64--" + +%union.ElementWiseAccess = type { <4 x float> } + +$getAt = comdat any + +define dso_local noundef <4 x float> @ConvertVectors_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %0) #0 { +; SSE-LABEL: @ConvertVectors_ByRef( +; SSE-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[TMP0:%.*]], align 16 +; SSE-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x float], ptr [[TMP0]], i64 0, i64 1 +; SSE-NEXT: [[TMP4:%.*]] = load <2 x float>, ptr [[TMP3]], align 4 +; SSE-NEXT: [[TMP5:%.*]] = shufflevector <2 x float> [[TMP4]], <2 x float> poison, <4 x i32> +; SSE-NEXT: [[TMP6:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP5]], <4 x i32> +; SSE-NEXT: [[TMP7:%.*]] = shufflevector <4 x float> [[TMP6]], <4 x float> [[TMP5]], <4 x i32> +; SSE-NEXT: ret <4 x float> [[TMP7]] +; +; AVX-LABEL: @ConvertVectors_ByRef( +; AVX-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[TMP0:%.*]], align 16 +; AVX-NEXT: [[TMP3:%.*]] = getelementptr inbounds [4 x float], ptr [[TMP0]], i64 0, i64 2 +; AVX-NEXT: [[TMP4:%.*]] = load float, ptr [[TMP3]], align 8 +; AVX-NEXT: [[TMP5:%.*]] = insertelement <4 x float> [[TMP2]], float [[TMP4]], i64 2 +; AVX-NEXT: [[TMP6:%.*]] = insertelement <4 x float> [[TMP5]], float [[TMP4]], i64 3 +; AVX-NEXT: ret <4 x float> [[TMP6]] +; + %2 = alloca ptr, align 8 + %3 = alloca <4 x float>, align 16 + store ptr %0, ptr %2, align 8 + %4 = load ptr, ptr %2, align 8 + %5 = call noundef nonnull align 16 dereferenceable(16) ptr @castToElementWiseAccess_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %4) + %6 = call noundef float @getAt(ptr noundef nonnull align 16 dereferenceable(16) %5, i32 noundef 0) + %7 = insertelement <4 x float> undef, float %6, i32 0 + %8 = load ptr, ptr %2, align 8 + %9 = call noundef nonnull align 16 dereferenceable(16) ptr @castToElementWiseAccess_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %8) + %10 = call noundef float @getAt(ptr noundef nonnull align 16 dereferenceable(16) %9, i32 noundef 1) + %11 = insertelement <4 x float> %7, float %10, i32 1 + %12 = load ptr, ptr %2, align 8 + %13 = call noundef nonnull align 16 dereferenceable(16) ptr @castToElementWiseAccess_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %12) + %14 = call noundef float @getAt(ptr noundef nonnull align 16 dereferenceable(16) %13, i32 noundef 2) + %15 = insertelement <4 x float> %11, float %14, i32 2 + %16 = load ptr, ptr %2, align 8 + %17 = call noundef nonnull align 16 dereferenceable(16) ptr @castToElementWiseAccess_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %16) + %18 = call noundef float @getAt(ptr noundef nonnull align 16 dereferenceable(16) %17, i32 noundef 2) + %19 = insertelement <4 x float> %15, float %18, i32 3 + store <4 x float> %19, ptr %3, align 16 + %20 = load <4 x float>, ptr %3, align 16 + ret <4 x float> %20 +} + +define internal noundef nonnull align 16 dereferenceable(16) ptr @castToElementWiseAccess_ByRef(ptr noundef nonnull align 16 dereferenceable(16) %0) #1 { + %2 = alloca ptr, align 8 + store ptr %0, ptr %2, align 8 + %3 = load ptr, ptr %2, align 8 + ret ptr %3 +} + +define linkonce_odr dso_local noundef float @getAt(ptr noundef nonnull align 16 dereferenceable(16) %0, i32 noundef %1) #1 comdat align 2 { + %3 = alloca ptr, align 8 + %4 = alloca i32, align 4 + store ptr %0, ptr %3, align 8 + store i32 %1, ptr %4, align 4 + %5 = load ptr, ptr %3, align 8 + %6 = load i32, ptr %4, align 4 + %7 = sext i32 %6 to i64 + %8 = getelementptr inbounds [4 x float], ptr %5, i64 0, i64 %7 + %9 = load float, ptr %8, align 4 + ret float %9 +}