diff --git a/llvm/test/Transforms/VectorCombine/X86/load-widening.ll b/llvm/test/Transforms/VectorCombine/X86/load-widening.ll index 6c14b36e25055..ebd26f82f50b7 100644 --- a/llvm/test/Transforms/VectorCombine/X86/load-widening.ll +++ b/llvm/test/Transforms/VectorCombine/X86/load-widening.ll @@ -1,8 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=sse2 --data-layout="e-m:e-i64:64-f80:128-n8:16:32:64-S128" | FileCheck %s --check-prefixes=CHECK -; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=avx2 --data-layout="e-m:e-i64:64-f80:128-n8:16:32:64-S128" | FileCheck %s --check-prefixes=CHECK -; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=sse2 --data-layout="E-m:e-i64:64-f80:128-n8:16:32:64-S128" | FileCheck %s --check-prefixes=CHECK -; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=avx2 --data-layout="E-m:e-i64:64-f80:128-n8:16:32:64-S128" | FileCheck %s --check-prefixes=CHECK +; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=sse2 --data-layout="e" | FileCheck %s --check-prefixes=CHECK +; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=avx2 --data-layout="e" | FileCheck %s --check-prefixes=CHECK +; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=sse2 --data-layout="E" | FileCheck %s --check-prefixes=CHECK +; RUN: opt < %s -vector-combine -S -mtriple=x86_64-- -mattr=avx2 --data-layout="E" | FileCheck %s --check-prefixes=CHECK ;------------------------------------------------------------------------------- ; Here we know we can load 128 bits as per dereferenceability and alignment. @@ -222,3 +222,32 @@ define <2 x float> @vec_with_2elts_addressspace(<2 x float> addrspace(2)* align %r = load <2 x float>, <2 x float> addrspace(2)* %p, align 16 ret <2 x float> %r } + +;------------------------------------------------------------------------------- + +; Widening these would change the legalized type, so leave them alone. + +define <2 x i1> @vec_with_2elts_128bits_i1(<2 x i1>* align 16 dereferenceable(16) %p) { +; CHECK-LABEL: @vec_with_2elts_128bits_i1( +; CHECK-NEXT: [[R:%.*]] = load <2 x i1>, <2 x i1>* [[P:%.*]], align 16 +; CHECK-NEXT: ret <2 x i1> [[R]] +; + %r = load <2 x i1>, <2 x i1>* %p, align 16 + ret <2 x i1> %r +} +define <2 x i2> @vec_with_2elts_128bits_i2(<2 x i2>* align 16 dereferenceable(16) %p) { +; CHECK-LABEL: @vec_with_2elts_128bits_i2( +; CHECK-NEXT: [[R:%.*]] = load <2 x i2>, <2 x i2>* [[P:%.*]], align 16 +; CHECK-NEXT: ret <2 x i2> [[R]] +; + %r = load <2 x i2>, <2 x i2>* %p, align 16 + ret <2 x i2> %r +} +define <2 x i4> @vec_with_2elts_128bits_i4(<2 x i4>* align 16 dereferenceable(16) %p) { +; CHECK-LABEL: @vec_with_2elts_128bits_i4( +; CHECK-NEXT: [[R:%.*]] = load <2 x i4>, <2 x i4>* [[P:%.*]], align 16 +; CHECK-NEXT: ret <2 x i4> [[R]] +; + %r = load <2 x i4>, <2 x i4>* %p, align 16 + ret <2 x i4> %r +}