Skip to content

[X86][AVX] suboptimal expansion of 256 bit vector loads. #22084

@adibiagio

Description

@adibiagio
Bugzilla Link 21710
Resolution FIXED
Resolved on Dec 05, 2014 16:16
Version trunk
OS Linux
CC @rotateright

Extended Description

Per-element loads for 128-bit vectors works fine (uses unaligned load instructions):

$ cat test.c
///
#include <x86intrin.h>
__m128 test1(const float* ptr) {
return (__m128){ ptr[0], ptr[1], ptr[2], ptr[3] };
}

__m128d test2(const double* ptr) {
return (__m128d){ ptr[0], ptr[1] };
}
///

$ clang test.c -march=btver2 -O2 -S -emit-llvm -o test.ll
$ cat test.ll

define <4 x float> @​test1(float* nocapture readonly %ptr) {
entry:
%0 = load float* %ptr, align 4, !tbaa !​1
%vecinit = insertelement <4 x float> undef, float %0, i32 0
%arrayidx1 = getelementptr inbounds float* %ptr, i64 1
%1 = load float* %arrayidx1, align 4, !tbaa !​1
%vecinit2 = insertelement <4 x float> %vecinit, float %1, i32 1
%arrayidx3 = getelementptr inbounds float* %ptr, i64 2
%2 = load float* %arrayidx3, align 4, !tbaa !​1
%vecinit4 = insertelement <4 x float> %vecinit2, float %2, i32 2
%arrayidx5 = getelementptr inbounds float* %ptr, i64 3
%3 = load float* %arrayidx5, align 4, !tbaa !​1
%vecinit6 = insertelement <4 x float> %vecinit4, float %3, i32 3
ret <4 x float> %vecinit6
}

define <2 x double> @​test2(double* nocapture readonly %ptr) {
entry:
%0 = load double* %ptr, align 8, !tbaa !​5
%vecinit = insertelement <2 x double> undef, double %0, i32 0
%arrayidx1 = getelementptr inbounds double* %ptr, i64 1
%1 = load double* %arrayidx1, align 8, !tbaa !​5
%vecinit2 = insertelement <2 x double> %vecinit, double %1, i32 1
ret <2 x double> %vecinit2
}

$ llc -mcpu=btver2 test.ll -o -

test1:
vmovups (%rdi), %xmm0
retq

test2:
vmovups (%rdi), %xmm0
retq


However, 256-bit vectors loads are only partially vectorized.

/////
#include <x86intrin.h>
__m256 test1(const float* ptr) {
return (__m256){ ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5], ptr[6], ptr[7] };
}

__m256d test2(const double* ptr) {
return (__m256d){ ptr[0], ptr[1], ptr[2], ptr[3] };
}
/////

$ clang test2.c -march=btver2 -O2 -S -emit-llvm -o test2.ll
$ cat test2.ll

define <8 x float> @​test1(float* nocapture readonly %ptr) {
entry:
%0 = load float* %ptr, align 4, !tbaa !​1
%vecinit = insertelement <8 x float> undef, float %0, i32 0
%arrayidx1 = getelementptr inbounds float* %ptr, i64 1
%1 = load float* %arrayidx1, align 4, !tbaa !​1
%vecinit2 = insertelement <8 x float> %vecinit, float %1, i32 1
%arrayidx3 = getelementptr inbounds float* %ptr, i64 2
%2 = load float* %arrayidx3, align 4, !tbaa !​1
%vecinit4 = insertelement <8 x float> %vecinit2, float %2, i32 2
%arrayidx5 = getelementptr inbounds float* %ptr, i64 3
%3 = load float* %arrayidx5, align 4, !tbaa !​1
%vecinit6 = insertelement <8 x float> %vecinit4, float %3, i32 3
%arrayidx7 = getelementptr inbounds float* %ptr, i64 4
%4 = load float* %arrayidx7, align 4, !tbaa !​1
%vecinit8 = insertelement <8 x float> %vecinit6, float %4, i32 4
%arrayidx9 = getelementptr inbounds float* %ptr, i64 5
%5 = load float* %arrayidx9, align 4, !tbaa !​1
%vecinit10 = insertelement <8 x float> %vecinit8, float %5, i32 5
%arrayidx11 = getelementptr inbounds float* %ptr, i64 6
%6 = load float* %arrayidx11, align 4, !tbaa !​1
%vecinit12 = insertelement <8 x float> %vecinit10, float %6, i32 6
%arrayidx13 = getelementptr inbounds float* %ptr, i64 7
%7 = load float* %arrayidx13, align 4, !tbaa !​1
%vecinit14 = insertelement <8 x float> %vecinit12, float %7, i32 7
ret <8 x float> %vecinit14
}

define <4 x double> @​test2(double* nocapture readonly %ptr) {
entry:
%0 = load double* %ptr, align 8, !tbaa !​5
%vecinit = insertelement <4 x double> undef, double %0, i32 0
%arrayidx1 = getelementptr inbounds double* %ptr, i64 1
%1 = load double* %arrayidx1, align 8, !tbaa !​5
%vecinit2 = insertelement <4 x double> %vecinit, double %1, i32 1
%arrayidx3 = getelementptr inbounds double* %ptr, i64 2
%2 = load double* %arrayidx3, align 8, !tbaa !​5
%vecinit4 = insertelement <4 x double> %vecinit2, double %2, i32 2
%arrayidx5 = getelementptr inbounds double* %ptr, i64 3
%3 = load double* %arrayidx5, align 8, !tbaa !​5
%vecinit6 = insertelement <4 x double> %vecinit4, double %3, i32 3
ret <4 x double> %vecinit6
}

$ llc -mcpu=btver2 test.ll -o -

test1:
vmovss 16(%rdi), %xmm1
vmovups (%rdi), %xmm0
vinsertps $16, 20(%rdi), %xmm1, %xmm1
vinsertps $32, 24(%rdi), %xmm1, %xmm1
vinsertps $48, 28(%rdi), %xmm1, %xmm1
vinsertf128 $1, %xmm1, %ymm0, %ymm0
retq

test2:
vmovsd 16(%rdi), %xmm1
vmovupd (%rdi), %xmm0
vmovhpd 24(%rdi), %xmm1, %xmm1
vinsertf128 %1, %xmm1, %ymm0, %ymm0
retq

Ideally, on AVX targets with feature FastUAMem and !SlowUAMem32, we should get:

test1:
vmovups (%rdi), %ymm0

test2:
vmovups (%rdi), %ymm0

The problem seems to be caused by a sub-optimal choice done during BUILD_VECTOR legalization.

The backend knows how to expand a 128-bit BUILD_VECTOR into a single 128-bit vector load. However, if a BUILD_VECTOR is performing a 256-bit vector load, the backend expands it into a 128-bit load plus an insert_subvector and a chain of insert_vector_elt nodes. That is the reason why for example 'test1' generates a sequence of insertps followed by a vinsertf128.

Metadata

Metadata

Assignees

Labels

Type

No type

Projects

No projects

Milestone

No milestone

Relationships

None yet

Development

No branches or pull requests

Issue actions