Skip to content
Permalink
Browse files

Misc SIMD updates

* Fixed v128_padd_u8 misnaming and incorrect NEON implementation
* Several new intrinsics added allowing cleanup in CDEF
* Added ARMv8 optimisations
* Misc improvements for x86 and ARM
* Build defaults to native architecture
* Added avx2 and core2 build options
* Various (harmless) sanitise issues fixed
  • Loading branch information...
stemidts authored and Thomas Davies committed Jun 1, 2018
1 parent f3eef37 commit b80addef0737cae22f6c7a4a0e055214223c0657
@@ -4,10 +4,16 @@ DECODER_PROGRAM = build/Thordec
CFLAGS += -std=c99 -g -O3 -Wall -pedantic -I common
LDFLAGS = -lm

export ARCH ?= native

ifeq ($(ARCH),neon)
CFLAGS += -mfpu=neon
endif

ifeq ($(ARCH),core2)
CFLAGS += -msse2 -mssse3 -mtune=core2
endif

ifeq ($(ARCH),ssse3)
CFLAGS += -mssse3
endif
@@ -16,6 +22,14 @@ ifeq ($(ARCH),sse4)
CFLAGS += -msse4
endif

ifeq ($(ARCH),avx2)
CFLAGS += -mavx2
endif

ifeq ($(ARCH),native)
CFLAGS += -march=native -mtune=native
endif

COMMON_SOURCES = \
common/common_block.c \
common/common_frame.c \
@@ -1858,7 +1858,7 @@ void TEMPLATE(scale_frame_down2x2_simd)(yuv_frame_t* sin, yuv_frame_t* sout)
v128 a = v128_load_unaligned(&sin->y[(2*i+0)*si+2*j]);
v128 b = v128_load_unaligned(&sin->y[(2*i+1)*si+2*j]);
v128 c = v128_avg_u8(a,b);
v128 d = v128_shr_s16(v128_padd_s8(c),1);
v128 d = v128_shr_s16(v128_padd_u8(c),1);
v64_store_aligned(&sout->y[i*so+j], v128_low_v64(v128_pack_s16_u8(z,d)));
}
for (; j<wo; ++j) {
@@ -1878,7 +1878,7 @@ void TEMPLATE(scale_frame_down2x2_simd)(yuv_frame_t* sin, yuv_frame_t* sout)
v128 a = v128_load_aligned(&sin->u[(2*i+0)*sic+2*j]);
v128 b = v128_load_aligned(&sin->u[(2*i+1)*sic+2*j]);
v128 c = v128_avg_u8(a,b);
v128 d = v128_shr_s16(v128_padd_s8(c),1);
v128 d = v128_shr_s16(v128_padd_u8(c),1);
v64_store_aligned(&sout->u[i*soc+j], v128_low_v64(v128_pack_s16_u8(z,d)));
}
for (; j<wo; ++j) {
@@ -2547,7 +2547,7 @@ int TEMPLATE(cdef_find_dir_simd)(const SAMPLE *img, int stride, int32_t *var,
__m128i t =
_mm_packs_epi32(_mm_cmpeq_epi32(max, dir03), _mm_cmpeq_epi32(max, dir47));
best_dir = _mm_movemask_epi8(_mm_packs_epi16(t, t));
best_dir = get_msb(best_dir ^ (best_dir - 1)); // Count trailing zeros
best_dir = log2i(best_dir ^ (best_dir - 1)); // Count trailing zeros
#else
/* Compute "mostly vertical" directions. */
compute_directions(lines, cost + 4);
@@ -2547,7 +2547,7 @@ int TEMPLATE(cdef_find_dir_simd)(const SAMPLE *img, int stride, int32_t *var,
__m128i t =
_mm_packs_epi32(_mm_cmpeq_epi32(max, dir03), _mm_cmpeq_epi32(max, dir47));
best_dir = _mm_movemask_epi8(_mm_packs_epi16(t, t));
best_dir = get_msb(best_dir ^ (best_dir - 1)); // Count trailing zeros
best_dir = log2i(best_dir ^ (best_dir - 1)); // Count trailing zeros
#else
/* Compute "mostly vertical" directions. */
compute_directions(lines, cost + 4);
@@ -59,7 +59,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#if defined(_WIN32)
#include <intrin.h>

SIMD_INLINE unsigned int log2i(uint32_t x)
SIMD_INLINE int log2i(uint32_t x)
{
unsigned long y;
_BitScanReverse(&y, x);
@@ -78,8 +78,10 @@ SIMD_INLINE void thor_free(void *p)
free(((void**)p)[-1]);
}

#elif defined(__GNUC__) && !defined(__clang__)
#elif (__GNUC__)&&(!__APPLE__)
#include <alloca.h>
#include <byteswap.h>


SIMD_INLINE unsigned int log2i(uint32_t x)
{
@@ -118,16 +120,18 @@ SIMD_INLINE void thor_free(void *p)

#endif


static const int simd_check = 1;

#if defined(__ARM_NEON__) && defined(ALIGN)
#if defined(__ARM_NEON) && defined(ALIGN)
static const int simd_available = 1;
#include "simd/v256_intrinsics_arm.h"
#elif (defined(__SSE2__) || _M_IX86_FP==2 || defined(_M_AMD64) || defined(_M_X64)) && defined(ALIGN)
static const int simd_available = 1;
#include "simd/v256_intrinsics_x86.h"
#else
static const int simd_available = 0;
#define NOSIMD 1
#include "simd/v256_intrinsics.h"
#endif

@@ -48,17 +48,19 @@ SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {

SIMD_INLINE v128 v128_load_unaligned(const void *p) { return c_v128_load_unaligned(p); }
SIMD_INLINE v128 v128_load_aligned(const void *p) { return c_v128_load_aligned(p); }

SIMD_INLINE v128 v128_load_two_v64(const void *p0, const void *p1) { return c_v128_load_two_v64(p0, p1); }
SIMD_INLINE v128 v128_load_low_v64(const void *p) { return c_v128_load_low_v64(p); }
SIMD_INLINE void v128_store_unaligned(void *p, v128 a) { c_v128_store_unaligned(p, a); }
SIMD_INLINE void v128_store_aligned(void *p, v128 a) { c_v128_store_aligned(p, a); }
SIMD_INLINE void v128_store_low_v64(void *p, v128 a) { c_v64_store_unaligned(p, v128_low_v64(a));}

SIMD_INLINE v128 v128_align(v128 a, v128 b, const unsigned int c) { return c_v128_align(a, b, c); }

SIMD_INLINE v128 v128_zero() { return c_v128_zero(); }
SIMD_INLINE v128 v128_dup_8(uint8_t x) { return c_v128_dup_8(x); }
SIMD_INLINE v128 v128_dup_16(uint16_t x) { return c_v128_dup_16(x); }
SIMD_INLINE v128 v128_dup_32(uint32_t x) { return c_v128_dup_32(x); }
SIMD_INLINE v128 v128_dup_64(uint32_t x) { return c_v128_dup_64(x); }
SIMD_INLINE v128 v128_dup_64(uint64_t x) { return c_v128_dup_64(x); }


typedef uint32_t sad128_internal;
@@ -69,10 +71,7 @@ typedef uint32_t ssd128_internal;
SIMD_INLINE ssd128_internal v128_ssd_u8_init() { return c_v128_ssd_u8_init(); }
SIMD_INLINE ssd128_internal v128_ssd_u8(ssd128_internal s, v128 a, v128 b) { return c_v128_ssd_u8(s, a, b); }
SIMD_INLINE uint32_t v128_ssd_u8_sum(ssd128_internal s) { return c_v128_ssd_u8_sum(s); }
typedef uint32_t ssd128_internal_u16;
SIMD_INLINE ssd128_internal_u16 v128_ssd_u16_init() { return c_v128_ssd_u16_init(); }
SIMD_INLINE ssd128_internal_u16 v128_ssd_u16(ssd128_internal_u16 s, v128 a, v128 b) { return c_v128_ssd_u16(s, a, b); }
SIMD_INLINE uint32_t v128_ssd_u16_sum(ssd128_internal_u16 s) { return c_v128_ssd_u16_sum(s); }
SIMD_INLINE int64_t v128_dotp_su8(v128 a, v128 b) { return c_v128_dotp_su8(a, b); }
SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) { return c_v128_dotp_s16(a, b); }
SIMD_INLINE int64_t v128_dotp_s32(v128 a, v128 b) { return c_v128_dotp_s32(a, b); }
SIMD_INLINE uint64_t v128_hadd_u8(v128 a) { return c_v128_hadd_u8(a); }
@@ -86,10 +85,12 @@ SIMD_INLINE v128 v128_andn(v128 a, v128 b) { return c_v128_andn(a, b); }

SIMD_INLINE v128 v128_add_8(v128 a, v128 b) { return c_v128_add_8(a, b); }
SIMD_INLINE v128 v128_add_16(v128 a, v128 b) { return c_v128_add_16(a, b); }
SIMD_INLINE v128 v128_sadd_u8(v128 a, v128 b) { return c_v128_sadd_u8(a, b); }
SIMD_INLINE v128 v128_sadd_s8(v128 a, v128 b) { return c_v128_sadd_s8(a, b); }
SIMD_INLINE v128 v128_sadd_s16(v128 a, v128 b) { return c_v128_sadd_s16(a, b); }
SIMD_INLINE v128 v128_add_32(v128 a, v128 b) { return c_v128_add_32(a, b); }
SIMD_INLINE v128 v128_add_64(v128 a, v128 b) { return c_v128_add_64(a, b); }
SIMD_INLINE v128 v128_padd_s8(v128 a) { return c_v128_padd_s8(a); }
SIMD_INLINE v128 v128_padd_u8(v128 a) { return c_v128_padd_u8(a); }
SIMD_INLINE v128 v128_padd_s16(v128 a) { return c_v128_padd_s16(a); }
SIMD_INLINE v128 v128_sub_8(v128 a, v128 b) { return c_v128_sub_8(a, b); }
SIMD_INLINE v128 v128_ssub_u8(v128 a, v128 b) { return c_v128_ssub_u8(a, b); }
@@ -110,6 +111,8 @@ SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) { return c_v128_mullo_s32(a, b);
SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) { return c_v128_madd_s16(a, b); }
SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) { return c_v128_madd_us8(a, b); }

SIMD_INLINE uint32_t v128_movemask_8(v128 a) { return c_v128_movemask_8(a); }
SIMD_INLINE v128 v128_blend_8(v128 a, v128 b, v128 c) { return c_v128_blend_8(a, b, c); }

SIMD_INLINE v128 v128_avg_u8(v128 a, v128 b) { return c_v128_avg_u8(a, b); }
SIMD_INLINE v128 v128_rdavg_u8(v128 a, v128 b) { return c_v128_rdavg_u8(a, b); }
@@ -121,7 +124,8 @@ SIMD_INLINE v128 v128_min_s8(v128 a, v128 b) { return c_v128_min_s8(a, b); }
SIMD_INLINE v128 v128_max_s8(v128 a, v128 b) { return c_v128_max_s8(a, b); }
SIMD_INLINE v128 v128_min_s16(v128 a, v128 b) { return c_v128_min_s16(a, b); }
SIMD_INLINE v128 v128_max_s16(v128 a, v128 b) { return c_v128_max_s16(a, b); }

SIMD_INLINE v128 v128_min_s32(v128 a, v128 b) { return c_v128_min_s32(a, b); }
SIMD_INLINE v128 v128_max_s32(v128 a, v128 b) { return c_v128_max_s32(a, b); }

SIMD_INLINE v128 v128_ziplo_8(v128 a, v128 b) { return c_v128_ziplo_8(a, b); }
SIMD_INLINE v128 v128_ziphi_8(v128 a, v128 b) { return c_v128_ziphi_8(a, b); }
@@ -143,6 +147,9 @@ SIMD_INLINE v128 v128_unziphi_32(v128 a, v128 b) { return c_v128_unziphi_32(a, b
SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) { return c_v128_unpack_u8_s16(a); }
SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) { return c_v128_unpacklo_u8_s16(a); }
SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) { return c_v128_unpackhi_u8_s16(a); }
SIMD_INLINE v128 v128_unpack_s8_s16(v64 a) { return c_v128_unpack_s8_s16(a); }
SIMD_INLINE v128 v128_unpacklo_s8_s16(v128 a) { return c_v128_unpacklo_s8_s16(a); }
SIMD_INLINE v128 v128_unpackhi_s8_s16(v128 a) { return c_v128_unpackhi_s8_s16(a); }
SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) { return c_v128_pack_s32_s16(a, b); }
SIMD_INLINE v128 v128_pack_s32_u16(v128 a, v128 b) { return c_v128_pack_s32_u16(a, b); }
SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) { return c_v128_pack_s16_u8(a, b); }
@@ -155,14 +162,15 @@ SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) { return c_v128_unpackhi_u16_s32(
SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) { return c_v128_unpackhi_s16_s32(a); }
SIMD_INLINE v128 v128_shuffle_8(v128 a, v128 pattern) { return c_v128_shuffle_8(a, pattern); }


SIMD_INLINE v128 v128_cmpgt_s8(v128 a, v128 b) { return c_v128_cmpgt_s8(a, b); }
SIMD_INLINE v128 v128_cmplt_s8(v128 a, v128 b) { return c_v128_cmplt_s8(a, b); }
SIMD_INLINE v128 v128_cmpeq_8(v128 a, v128 b) { return c_v128_cmpeq_8(a, b); }
SIMD_INLINE v128 v128_cmpgt_s16(v128 a, v128 b) { return c_v128_cmpgt_s16(a, b); }
SIMD_INLINE v128 v128_cmplt_s16(v128 a, v128 b) { return c_v128_cmplt_s16(a, b); }
SIMD_INLINE v128 v128_cmpeq_16(v128 a, v128 b) { return c_v128_cmpeq_16(a, b); }

SIMD_INLINE v128 v128_cmpgt_s32(v128 a, v128 b) { return c_v128_cmpgt_s32(a, b); }
SIMD_INLINE v128 v128_cmplt_s32(v128 a, v128 b) { return c_v128_cmplt_s32(a, b); }
SIMD_INLINE v128 v128_cmpeq_32(v128 a, v128 b) { return c_v128_cmpeq_32(a, b); }

SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) { return c_v128_shl_8(a, c); }
SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) { return c_v128_shr_u8(a, c); }
@@ -197,4 +205,9 @@ SIMD_INLINE sad128_internal_u16 v128_sad_u16_init() { return c_v128_sad_u16_init
SIMD_INLINE sad128_internal_u16 v128_sad_u16(sad128_internal_u16 s, v128 a, v128 b) { return c_v128_sad_u16(s, a, b); }
SIMD_INLINE uint32_t v128_sad_u16_sum(sad128_internal_u16 s) { return c_v128_sad_u16_sum(s); }

typedef uint64_t ssd128_internal_s16;
SIMD_INLINE ssd128_internal_s16 v128_ssd_s16_init() { return c_v128_ssd_s16_init(); }
SIMD_INLINE ssd128_internal_s16 v128_ssd_s16(ssd128_internal_s16 s, v128 a, v128 b) { return c_v128_ssd_s16(s, a, b); }
SIMD_INLINE uint64_t v128_ssd_s16_sum(ssd128_internal_s16 s) { return c_v128_ssd_s16_sum(s); }

#endif /* _V128_INTRINSICS_H */
Oops, something went wrong.

0 comments on commit b80adde

Please sign in to comment.
You can’t perform that action at this time.
You signed in with another tab or window. Reload to refresh your session. You signed out in another tab or window. Reload to refresh your session.