From 8413b888ffb5b86ad5143fbfb6957e54e50f807b Mon Sep 17 00:00:00 2001 From: Tom Gall Date: Wed, 11 Jan 2012 12:58:34 -0600 Subject: [PATCH] Pull in and lightly rework 565 and 8888 optimizations previously authored by Qualcomm but not upstreamed --- Android.mk | 44 +- Makefile.am | 4 +- asm/armv7/jdcolor-armv7.S | 1742 +++++++++++++++++++++++++++++++++++++ asm/armv7/jdidct-armv7.S | 762 ++++++++++++++++ config.h | 131 --- jconfig.h | 1 - jdcolor.c | 61 +- jdmerge.c | 77 +- jpegint.h | 40 + libjpeg.map | 10 + simd/Makefile.am | 7 +- simd/jsimd_arm.c | 111 +++ 12 files changed, 2832 insertions(+), 158 deletions(-) create mode 100644 asm/armv7/jdcolor-armv7.S create mode 100644 asm/armv7/jdidct-armv7.S delete mode 100644 config.h create mode 100644 libjpeg.map diff --git a/Android.mk b/Android.mk index f924e71..428963a 100644 --- a/Android.mk +++ b/Android.mk @@ -7,6 +7,18 @@ ifneq ($(TARGET_SIMULATOR),true) ################################################## LOCAL_PATH := $(my-dir) include $(CLEAR_VARS) + +# Set ANDROID_JPEG_USE_VENUM to true to enable VeNum optimizations +ANDROID_JPEG_USE_VENUM := true + +# Disable VeNum optimizations if they are not supported on the build target +#ifneq ($(ARCH_ARM_HAVE_VFP),true) +#ANDROID_JPEG_USE_VENUM := false +#else +#ifneq ($(ARCH_ARM_HAVE_NEON),true) +#ANDROID_JPEG_USE_VENUM := false +#endif +#endif # From autoconf-generated Makefile EXTRA_DIST = simd/nasm_lt.sh simd/jcclrmmx.asm simd/jcclrss2.asm simd/jdclrmmx.asm simd/jdclrss2.asm \ @@ -14,6 +26,7 @@ EXTRA_DIST = simd/nasm_lt.sh simd/jcclrmmx.asm simd/jcclrss2.asm simd/jdclrmmx.a simd/jdmrgss2-64.asm simd/CMakeLists.txt libsimd_SOURCES_DIST = simd/jsimd_arm_neon.S \ + asm/armv7//jdcolor-armv7.S asm/armv7/jdidct-armv7.S \ simd/jsimd_arm.c # or jsimd_none.c @@ -21,9 +34,10 @@ libsimd_SOURCES_DIST = simd/jsimd_arm_neon.S \ LOCAL_SRC_FILES := $(libsimd_SOURCES_DIST) -LOCAL_C_INCLUDES := $(LOCAL_PATH)/simd +LOCAL_C_INCLUDES := $(LOCAL_PATH)/simd \ + $(LOCAL_PATH)/android -LOCAL_CFLAGS := +LOCAL_CFLAGS := -DANDROID_JPEG_USE_VENUM AM_CFLAGS := -march=armv7-a -mfpu=neon AM_CCASFLAGS := -march=armv7-a -mfpu=neon @@ -59,10 +73,11 @@ LOCAL_SRC_FILES:= $(libjpeg_SOURCES_DIST) LOCAL_SHARED_LIBRARIES := libcutils LOCAL_STATIC_LIBRARIES := libsimd -LOCAL_C_INCLUDES := $(LOCAL_PATH) +LOCAL_C_INCLUDES := $(LOCAL_PATH) \ + $(LOCAL_PATH)/android LOCAL_CFLAGS := -DAVOID_TABLES -O3 -fstrict-aliasing -fprefetch-loop-arrays -DANDROID \ - -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT + -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT -DANDROID_JPEG_USE_VENUM #-DANDROID_TILE_BASED_DECODE -DUSE_ANDROID_ASHMEM @@ -88,7 +103,8 @@ LOCAL_SRC_FILES:= $(cjpeg_SOURCES) LOCAL_SHARED_LIBRARIES := libjpeg -LOCAL_C_INCLUDES := $(LOCAL_PATH) +LOCAL_C_INCLUDES := $(LOCAL_PATH) \ + $(LOCAL_PATH)/android LOCAL_CFLAGS := -DBMP_SUPPORTED -DGIF_SUPPORTED -DPPM_SUPPORTED -DTARGA_SUPPORTED \ -DANDROID -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT @@ -115,7 +131,8 @@ LOCAL_SRC_FILES:= $(djpeg_SOURCES) LOCAL_SHARED_LIBRARIES := libjpeg -LOCAL_C_INCLUDES := $(LOCAL_PATH) +LOCAL_C_INCLUDES := $(LOCAL_PATH) \ + $(LOCAL_PATH)/android LOCAL_CFLAGS := -DBMP_SUPPORTED -DGIF_SUPPORTED -DPPM_SUPPORTED -DTARGA_SUPPORTED \ -DANDROID -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT @@ -141,7 +158,8 @@ LOCAL_SRC_FILES:= $(jpegtran_SOURCES) LOCAL_SHARED_LIBRARIES := libjpeg -LOCAL_C_INCLUDES := $(LOCAL_PATH) +LOCAL_C_INCLUDES := $(LOCAL_PATH) \ + $(LOCAL_PATH)/android LOCAL_CFLAGS := -DANDROID -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT @@ -166,7 +184,8 @@ LOCAL_SRC_FILES:= $(tjunittest_SOURCES) LOCAL_SHARED_LIBRARIES := libjpeg -LOCAL_C_INCLUDES := $(LOCAL_PATH) +LOCAL_C_INCLUDES := $(LOCAL_PATH) \ + $(LOCAL_PATH)/android LOCAL_CFLAGS := -DANDROID -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT @@ -192,7 +211,8 @@ LOCAL_SRC_FILES:= $(tjbench_SOURCES) LOCAL_SHARED_LIBRARIES := libjpeg -LOCAL_C_INCLUDES := $(LOCAL_PATH) +LOCAL_C_INCLUDES := $(LOCAL_PATH) \ + $(LOCAL_PATH)/android LOCAL_CFLAGS := -DBMP_SUPPORTED -DPPM_SUPPORTED \ -DANDROID -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT @@ -218,7 +238,8 @@ LOCAL_SRC_FILES:= $(rdjpgcom_SOURCES) LOCAL_SHARED_LIBRARIES := libjpeg -LOCAL_C_INCLUDES := $(LOCAL_PATH) +LOCAL_C_INCLUDES := $(LOCAL_PATH) \ + $(LOCAL_PATH)/android LOCAL_CFLAGS := -DANDROID -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT @@ -243,7 +264,8 @@ LOCAL_SRC_FILES:= $(wrjpgcom_SOURCES) LOCAL_SHARED_LIBRARIES := libjpeg -LOCAL_C_INCLUDES := $(LOCAL_PATH) +LOCAL_C_INCLUDES := $(LOCAL_PATH) \ + $(LOCAL_PATH)/android LOCAL_CFLAGS := -DANDROID -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT diff --git a/Makefile.am b/Makefile.am index e78342f..be75a92 100644 --- a/Makefile.am +++ b/Makefile.am @@ -2,7 +2,7 @@ lib_LTLIBRARIES = libjpeg.la libturbojpeg.la libjpeg_la_LDFLAGS = -version-info ${SO_MAJOR_VERSION}:${SO_MINOR_VERSION} -no-undefined libturbojpeg_la_LDFLAGS = -avoid-version -no-undefined libturbojpeg_la_CFLAGS = -DAVOID_TABLES -O3 -fstrict-aliasing -fprefetch-loop-arrays -DANDROID \ - -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT + -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT -DANDROID_JPEG_USE_VENUM include_HEADERS = jerror.h jmorecfg.h jpeglib.h turbojpeg.h nodist_include_HEADERS = jconfig.h @@ -16,7 +16,7 @@ libjpeg_la_SOURCES = $(HDRS) jcapimin.c jcapistd.c jccoefct.c jccolor.c \ jddctmgr.c jdhuff.c jdinput.c jdmainct.c jdmarker.c jdmaster.c \ jdmerge.c jdphuff.c jdpostct.c jdsample.c jdtrans.c jerror.c \ jfdctflt.c jfdctfst.c jfdctint.c jidctflt.c jidctfst.c jidctint.c \ - jidctred.c jquant1.c jquant2.c jutils.c jmemmgr.c jmemnobs.c + jidctred.c jquant1.c jquant2.c jutils.c jmemmgr.c jmemnobs.c if WITH_ARITH diff --git a/asm/armv7/jdcolor-armv7.S b/asm/armv7/jdcolor-armv7.S new file mode 100644 index 0000000..09a81f9 --- /dev/null +++ b/asm/armv7/jdcolor-armv7.S @@ -0,0 +1,1742 @@ +/*------------------------------------------------------------------------ +* jdcolor-armv7.s +* +* Copyright (c) 2010, Code Aurora Forum. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of Code Aurora Forum, Inc. nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*-------------------------------------------------------------------------- + +*-------------------------------------------------------------------------- +* FUNCTION LIST +*-------------------------------------------------------------------------- +* +* - yvup2rgb565_venum +* - yyvup2rgb565_venum +* - yvup2bgr888_venum +* - yyvup2bgr888_venum +* - yvup2bgr8888_venum +* - yyvup2bgr8888_venum +* +*-------------------------------------------------------------------------- +*/ + + .section yvu_plain_to_rgb565, "x" @ AREA + .text @ |.text|, CODE, READONLY + .align 2 + .code 32 @ CODE32 + +/*----------------------------------------------------------------------------- + * ARM Registers + * ---------------------------------------------------------------------------- */ +p_y .req r0 +p_cr .req r1 +p_cb .req r2 +p_rgb .req r3 +p_bgr .req r3 +length .req r12 + +#if !defined(ANDROID_JPEG_DISABLE_VENUM_YCC_RGB_565) + .global yvup2rgb565_venum + .global yyvup2rgb565_venum +#endif + .global yvup2bgr888_venum + .global yyvup2bgr888_venum + .global yvup2abgr8888_venum + .global yyvup2abgr8888_venum + +@ coefficients in color conversion matrix multiplication +.equ COEFF_Y, 256 @ contribution of Y +.equ COEFF_V_RED, 359 @ contribution of V for red +.equ COEFF_U_GREEN, -88 @ contribution of U for green +.equ COEFF_V_GREEN, -183 @ contribution of V for green +.equ COEFF_U_BLUE, 454 @ contribution of U for blue + +@ Clamping constants 0x0 and 0xFF +.equ COEFF_0, 0 +.equ COEFF_255, 255 + +@ Bias coefficients for red, green and blue +.equ COEFF_BIAS_R, -45824 @ Red bias = -359*128 + 128 +.equ COEFF_BIAS_G, 34816 @ Green bias = (88+183)*128 + 128 +.equ COEFF_BIAS_B, -57984 @ Blue bias = -454*128 + 128 + + +#if !defined(ANDROID_JPEG_DISABLE_VENUM_YCC_RGB_565) +/*-------------------------------------------------------------------------- +* FUNCTION : yvup2rgb565_venum +*-------------------------------------------------------------------------- +* DESCRIPTION : Perform YVU planar to RGB565 conversion. +*-------------------------------------------------------------------------- +* C PROTOTYPE : void yvup2rgb565_venum(uint8_t *p_y, +* uint8_t *p_cr, +* uint8_t *p_cb, +* uint8_t *p_rgb565, +* uint32_t length) +*-------------------------------------------------------------------------- +* REG INPUT : R0: uint8_t *p_y +* pointer to the input Y Line +* R1: uint8_t *p_cr +* pointer to the input Cr Line +* R2: uint8_t *p_cb +* pointer to the input Cb Line +* R3: uint8_t *p_rgb565 +* pointer to the output RGB Line +* R12: uint32_t length +* width of Line +*-------------------------------------------------------------------------- +* STACK ARG : None +*-------------------------------------------------------------------------- +* REG OUTPUT : None +*-------------------------------------------------------------------------- +* MEM INPUT : p_y - a line of Y pixels +* p_cr - a line of Cr pixels +* p_cb - a line of Cb pixels +* length - the width of the input line +*-------------------------------------------------------------------------- +* MEM OUTPUT : p_rgb565 - the converted rgb pixels +*-------------------------------------------------------------------------- +* REG AFFECTED : ARM: R0-R4, R12 +* NEON: Q0-Q15 +*-------------------------------------------------------------------------- +* STACK USAGE : none +*-------------------------------------------------------------------------- +* CYCLES : none +* +*-------------------------------------------------------------------------- +* NOTES : +*-------------------------------------------------------------------------- +*/ +.type yvup2rgb565_venum, %function +yvup2rgb565_venum: + /*------------------------------------------------------------------------- + * Store stack registers + * ------------------------------------------------------------------------ */ + STMFD SP!, {LR} + + PLD [R0, R3] @ preload luma line + + ADR R12, constants + + VLD1.S16 {D6, D7}, [R12]! @ D6, D7: 359 | -88 | -183 | 454 | 256 | 0 | 255 | 0 + VLD1.S32 {D30, D31}, [R12] @ Q15 : -45824 | 34816 | -57984 | X + /*------------------------------------------------------------------------- + * Load the 5th parameter via stack + * R0 ~ R3 are used to pass the first 4 parameters, the 5th and above + * parameters are passed via stack + * ------------------------------------------------------------------------ */ + LDR R12, [SP, #4] @ LR is the only one that has been pushed + @ into stack, increment SP by 4 to + @ get the parameter. + @ LDMIB SP, {R12} is an equivalent + @ instruction in this case, where only + @ one register was pushed into stack. + + /*------------------------------------------------------------------------- + * Load clamping parameters to duplicate vector elements + * ------------------------------------------------------------------------ */ + VDUP.S16 Q4, D7[1] @ Q4: 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + VDUP.S16 Q5, D7[2] @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + /*------------------------------------------------------------------------- + * Read bias + * ------------------------------------------------------------------------ */ + VDUP.S32 Q0, D30[0] @ Q0: -45824 | -45824 | -45824 | -45824 + VDUP.S32 Q1, D30[1] @ Q1: 34816 | 34816 | 34816 | 34816 + VDUP.S32 Q2, D31[0] @ Q2: -70688 | -70688 | -70688 | -70688 + + + /*------------------------------------------------------------------------- + * The main loop + * ------------------------------------------------------------------------ */ +loop_yvup2rgb565: + + /*------------------------------------------------------------------------- + * Load input from Y, V and U + * D12 : Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 + * D14 : V0 V1 V2 V3 V4 V5 V6 V7 + * D15 : U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VLD1.U8 {D12}, [p_y]! @ Load 8 Y elements (uint8) to D12 + VLD1.U8 {D14}, [p_cr]! @ Load 8 Cr elements (uint8) to D14 + VLD1.U8 {D15}, [p_cb]! @ Load 8 Cb elements (uint8) to D15 + + /*------------------------------------------------------------------------- + * Expand uint8 value to uint16 + * D18, D19: Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 + * D20, D21: V0 V1 V2 V3 V4 V5 V6 V7 + * D22, D23: U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VMOVL.U8 Q9, D12 + VMOVL.U8 Q10, D14 + VMOVL.U8 Q11, D15 + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q12, D20, D6[0] @ Q12: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q13, D22, D6[1] @ Q13: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q13, D20, D6[2] @ Q13: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q14, D22, D6[3] @ Q14: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q12, Q0 @ Q12 add Red bias -45824 + VADD.S32 Q13, Q1 @ Q13 add Green bias 34816 + VADD.S32 Q14, Q2 @ Q14 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMLAL.S16 Q12, D18, D7[0] @ Q12: R0, R1, R2, R3 in 32-bit Q8 format + VMLAL.S16 Q13, D18, D7[0] @ Q13: G0, G1, G2, G3 in 32-bit Q8 format + VMLAL.S16 Q14, D18, D7[0] @ Q14: B0, B1, B2, B3 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D18 , Q12, #8 @ D18: R0, R1, R2, R3 in 16-bit Q0 format + VSHRN.S32 D20 , Q13, #8 @ D20: G0, G1, G2, G3 in 16-bit Q0 format + VSHRN.S32 D22, Q14, #8 @ D22: B0, B1, B2, B3 in 16-bit Q0 format + + /*------------------------------------------------------------------------- + * Done with the first 4 elements, continue on the next 4 elements + * ------------------------------------------------------------------------ */ + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q12, D21, D6[0] @ Q12: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q13, D23, D6[1] @ Q13: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q13, D21, D6[2] @ Q13: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q14, D23, D6[3] @ Q14: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q12, Q0 @ Q12 add Red bias -45824 + VADD.S32 Q13, Q1 @ Q13 add Green bias 34816 + VADD.S32 Q14, Q2 @ Q14 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMLAL.S16 Q12, D19, D7[0] @ Q12: R0, R1, R2, R3 in 32-bit Q8 format + VMLAL.S16 Q13, D19, D7[0] @ Q13: G0, G1, G2, G3 in 32-bit Q8 format + VMLAL.S16 Q14, D19, D7[0] @ Q14: B0, B1, B2, B3 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D19 , Q12, #8 @ D18: R0, R1, R2, R3 in 16-bit Q0 format + VSHRN.S32 D21 , Q13, #8 @ D20: G0, G1, G2, G3 in 16-bit Q0 format + VSHRN.S32 D23, Q14, #8 @ D22: B0, B1, B2, B3 in 16-bit Q0 format + + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D28, Q9 @ store Red to D28, narrow the value from int16 to int8 + + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D27, Q10 @ store Green to D27, narrow the value from int16 to int8 + + VMAX.S16 Q11, Q11, Q4 @ if Q11 < 0, Q11 = 0 + VMIN.S16 Q11, Q11, Q5 @ if Q11 > 255, Q11 = 255 + VQMOVUN.S16 D26, Q11 @ store Blue to D26, narrow the value from int16 to int8. + + /*------------------------------------------------------------------------- + * D27: 3 bits of Green + 5 bits of Blue + * D28: 5 bits of Red + 3 bits of Green + * ------------------------------------------------------------------------ */ + VSRI.8 D28, D27, #5 @ right shift G by 5 and insert to R + VSHL.U8 D27, D27, #3 @ left shift G by 3 + VSRI.8 D27, D26, #3 @ right shift B by 3 and insert to G + + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yvup2rgb565 @ jump to trailing processing if remaining length is less than 8 + + VST2.U8 {D27, D28}, [p_rgb]! @ vector store Red, Green, Blue to destination + @ Blue at LSB + + BHI loop_yvup2rgb565 @ loop if more than 8 pixels left + + BEQ end_yvup2rgb565 @ done if exactly 8 pixel processed in the loop + + +trailing_yvup2rgb565: + /*------------------------------------------------------------------------- + * There are from 1 ~ 7 pixels left in the trailing part. + * First adding 7 to the length so the length would be from 0 ~ 6. + * eg: 1 pixel left in the trailing part, so 1-8+7 = 0. + * Then save 1 pixel unconditionally since at least 1 pixels left in the + * trailing part. + * ------------------------------------------------------------------------ */ + ADDS length, length, #7 @ there are 7 or less in the trailing part + + VST2.U8 {D27[0], D28[0]}, [p_rgb]! @ at least 1 pixel left in the trailing part + BEQ end_yvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D27[1], D28[1]}, [p_rgb]! @ store one more pixel + BEQ end_yvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D27[2], D28[2]}, [p_rgb]! @ store one more pixel + BEQ end_yvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D27[3], D28[3]}, [p_rgb]! @ store one more pixel + BEQ end_yvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D27[4], D28[4]}, [p_rgb]! @ store one more pixel + BEQ end_yvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D27[5], D28[5]}, [p_rgb]! @ store one more pixel + BEQ end_yvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D27[6], D28[6]}, [p_rgb]! @ store one more pixel + +end_yvup2rgb565: + LDMFD SP!, {PC} + + @ end of yvup2rgb565 + + +/*-------------------------------------------------------------------------- +* FUNCTION : yyvup2rgb565_venum +*-------------------------------------------------------------------------- +* DESCRIPTION : Perform YYVU planar to RGB565 conversion. +*-------------------------------------------------------------------------- +* C PROTOTYPE : void yyvup2rgb565_venum(uint8_t *p_y, +* uint8_t *p_cr, +* uint8_t *p_cb, +* uint8_t *p_rgb565, +* uint32_t length) +*-------------------------------------------------------------------------- +* REG INPUT : R0: uint8_t *p_y +* pointer to the input Y Line +* R1: uint8_t *p_cr +* pointer to the input Cr Line +* R2: uint8_t *p_cb +* pointer to the input Cb Line +* R3: uint8_t *p_rgb565 +* pointer to the output RGB Line +* R12: uint32_t length +* width of Line +*-------------------------------------------------------------------------- +* STACK ARG : None +*-------------------------------------------------------------------------- +* REG OUTPUT : None +*-------------------------------------------------------------------------- +* MEM INPUT : p_y - a line of Y pixels +* p_cr - a line of Cr pixels +* p_cb - a line of Cb pixels +* length - the width of the input line +*-------------------------------------------------------------------------- +* MEM OUTPUT : p_rgb565 - the converted rgb pixels +*-------------------------------------------------------------------------- +* REG AFFECTED : ARM: R0-R4, R12 +* NEON: Q0-Q15 +*-------------------------------------------------------------------------- +* STACK USAGE : none +*-------------------------------------------------------------------------- +* CYCLES : none +* +*-------------------------------------------------------------------------- +* NOTES : +*-------------------------------------------------------------------------- +*/ +.type yyvup2rgb565_venum, %function +yyvup2rgb565_venum: + /*------------------------------------------------------------------------- + * Store stack registers + * ------------------------------------------------------------------------ */ + STMFD SP!, {LR} + + PLD [R0, R3] @ preload luma line + + ADR R12, constants + + VLD1.S16 {D6, D7}, [R12]! @ D6, D7: 359 | -88 | -183 | 454 | 256 | 0 | 255 | 0 + VLD1.S32 {D30, D31}, [R12] @ Q15 : -45824 | 34816 | -57984 | X + + /*------------------------------------------------------------------------- + * Load the 5th parameter via stack + * R0 ~ R3 are used to pass the first 4 parameters, the 5th and above + * parameters are passed via stack + * ------------------------------------------------------------------------ */ + LDR R12, [SP, #4] @ LR is the only one that has been pushed + @ into stack, increment SP by 4 to + @ get the parameter. + @ LDMIB SP, {R12} is an equivalent + @ instruction in this case, where only + @ one register was pushed into stack. + + /*------------------------------------------------------------------------- + * Load clamping parameters to duplicate vector elements + * ------------------------------------------------------------------------ */ + VDUP.S16 Q4, D7[1] @ Q4: 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + VDUP.S16 Q5, D7[2] @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + /*------------------------------------------------------------------------- + * Read bias + * ------------------------------------------------------------------------ */ + VDUP.S32 Q0, D30[0] @ Q0: -45824 | -45824 | -45824 | -45824 + VDUP.S32 Q1, D30[1] @ Q1: 34816 | 34816 | 34816 | 34816 + VDUP.S32 Q2, D31[0] @ Q2: -70688 | -70688 | -70688 | -70688 + + + /*------------------------------------------------------------------------- + * The main loop + * ------------------------------------------------------------------------ */ +loop_yyvup2rgb565: + + /*------------------------------------------------------------------------- + * Load input from Y, V and U + * D12, D13: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14, Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15 + * D14 : V0 V1 V2 V3 V4 V5 V6 V7 + * D15 : U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VLD2.U8 {D12,D13}, [p_y]! @ Load 16 Luma elements (uint8) to D12, D13 + VLD1.U8 {D14}, [p_cr]! @ Load 8 Cr elements (uint8) to D14 + VLD1.U8 {D15}, [p_cb]! @ Load 8 Cb elements (uint8) to D15 + + /*------------------------------------------------------------------------- + * Expand uint8 value to uint16 + * D24, D25: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14 + * D26, D27: Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15 + * D28, D29: V0 V1 V2 V3 V4 V5 V6 V7 + * D30, D31: U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VMOVL.U8 Q12, D12 + VMOVL.U8 Q13, D13 + VMOVL.U8 Q14, D14 + VMOVL.U8 Q15, D15 + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q6, D28, D6[0] @ Q6: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q7, D30, D6[1] @ Q7: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q7, D28, D6[2] @ q7: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q8, D30, D6[3] @ q8: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q6, Q0 @ Q6 add Red bias -45824 + VADD.S32 Q7, Q1 @ Q7 add Green bias 34816 + VADD.S32 Q8, Q2 @ Q8 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMOV.S32 Q9, Q6 + VMLAL.S16 Q6, D24, D7[0] @ Q6: R0, R2, R4, R6 in 32-bit Q8 format + VMLAL.S16 Q9, D26, D7[0] @ Q9: R1, R3, R5, R7 in 32-bit Q8 format + + VMOV.S32 Q10, Q7 + VMLAL.S16 Q7, D24, D7[0] @ Q7: G0, G2, G4, G6 in 32-bit Q8 format + VMLAL.S16 Q10, D26, D7[0] @ Q10: G1, G3, G5, G7 in 32-bit Q8 format + + VMOV.S32 Q11, Q8 + VMLAL.S16 Q8, D24, D7[0] @ Q8: B0, B2, B4, B6 in 32-bit Q8 format + VMLAL.S16 Q11, D26, D7[0] @ Q11: B1, B3, B5, B7 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D12, Q6, #8 @ D12: R0 R2 R4 R6 in 16-bit Q0 format + VSHRN.S32 D13, Q9, #8 @ D13: R1 R3 R5 R7 in 16-bit Q0 format + VZIP.16 D12, D13 @ Q6 : R0 R1 R2 R3 R4 R5 R6 R7 + + VSHRN.S32 D18, Q7, #8 @ D18: G0 G2 G4 G6 in 16-bit Q0 format + VSHRN.S32 D19, Q10, #8 @ D19: G1 G3 G5 G7 in 16-bit Q0 format + VZIP.16 D18, D19 @ Q9 : G0 G1 G2 G3 G4 G5 G6 G7 + + VSHRN.S32 D20, Q8, #8 @ D20: B0 B2 B4 B6 in 16-bit Q0 format + VSHRN.S32 D21, Q11, #8 @ D21: B1 B3 B5 B7 in 16-bit Q0 format + VZIP.16 D20, D21 @ Q10: B0 B1 B2 B3 B4 B5 B6 B7 + + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q6, Q6, Q4 @ if Q6 < 0, Q6 = 0 + VMIN.S16 Q6, Q6, Q5 @ if Q6 > 255, Q6 = 255 + VQMOVUN.S16 D23, Q6 @ store Red to D23, narrow the value from int16 to int8 + + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D22, Q9 @ store Green to D22, narrow the value from int16 to int8 + + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D21, Q10 @ store Blue to D21, narrow the value from int16 to int8 + + /*------------------------------------------------------------------------- + * D22: 3 bits of Green + 5 bits of Blue + * D23: 5 bits of Red + 3 bits of Green + * ------------------------------------------------------------------------ */ + VSRI.8 D23, D22, #5 @ right shift G by 5 and insert to R + VSHL.U8 D22, D22, #3 @ left shift G by 3 + VSRI.8 D22, D21, #3 @ right shift B by 3 and insert to G + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yyvup2rgb565 @ jump to trailing processing if remaining length is less than 8 + + VST2.U8 {D22,D23}, [p_rgb]! @ vector store Red, Green, Blue to destination + @ Blue at LSB + + BEQ end_yyvup2rgb565 @ done if exactly 8 pixel processed in the loop + + + /*------------------------------------------------------------------------- + * Done with the first 8 elements, continue on the next 8 elements + * ------------------------------------------------------------------------ */ + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q6, D29, D6[0] @ Q6: 359*(V4,V5,V6,V7) Red + VMULL.S16 Q7, D31, D6[1] @ Q7: -88*(U4,U5,U6,U7) Green + VMLAL.S16 Q7, D29, D6[2] @ Q7: -88*(U4,U5,U6,U7) - 183*(V4,V5,V6,V7) + VMULL.S16 Q8, D31, D6[3] @ Q8: 454*(U4,U5,U6,U7) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q6, Q0 @ Q6 add Red bias -45824 + VADD.S32 Q7, Q1 @ Q7 add Green bias 34816 + VADD.S32 Q8, Q2 @ Q8 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMOV.S32 Q9, Q6 + VMLAL.S16 Q6, D25, D7[0] @ Q6: R8 R10 R12 R14 in 32-bit Q8 format + VMLAL.S16 Q9, D27, D7[0] @ Q9: R9 R11 R13 R15 in 32-bit Q8 format + + VMOV.S32 Q10, Q7 + VMLAL.S16 Q7, D25, D7[0] @ Q7: G0, G2, G4, G6 in 32-bit Q8 format + VMLAL.S16 Q10, D27, D7[0] @ Q10 : G1, G3, G5, G7 in 32-bit Q8 format + + VMOV.S32 Q11, Q8 + VMLAL.S16 Q8, D25, D7[0] @ Q8: B0, B2, B4, B6 in 32-bit Q8 format + VMLAL.S16 Q11, D27, D7[0] @ Q11 : B1, B3, B5, B7 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D12, Q6, #8 @ D12: R8 R10 R12 R14 in 16-bit Q0 format + VSHRN.S32 D13, Q9, #8 @ D13: R9 R11 R13 R15 in 16-bit Q0 format + VZIP.16 D12, D13 @ Q6: R8 R9 R10 R11 R12 R13 R14 R15 + + VSHRN.S32 D18, Q7, #8 @ D18: G8 G10 G12 G14 in 16-bit Q0 format + VSHRN.S32 D19, Q10, #8 @ D19: G9 G11 G13 G15 in 16-bit Q0 format + VZIP.16 D18, D19 @ Q9: G8 G9 G10 G11 G12 G13 G14 G15 + + VSHRN.S32 D20, Q8, #8 @ D20: B8 B10 B12 B14 in 16-bit Q0 format + VSHRN.S32 D21, Q11, #8 @ D21: B9 B11 B13 B15 in 16-bit Q0 format + VZIP.16 D20, D21 @ Q10: B8 B9 B10 B11 B12 B13 B14 B15 + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q6, Q6, Q4 @ if Q6 < 0, Q6 = 0 + VMIN.S16 Q6, Q6, Q5 @ if Q6 > 255, Q6 = 255 + VQMOVUN.S16 D23, Q6 @ store Red to D23, narrow the value from int16 to int8 + + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D22, Q9 @ store Green to D22, narrow the value from int16 to int8 + + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D21, Q10 @ store Blue to D21, narrow the value from int16 to int8 + + /*------------------------------------------------------------------------- + * D22: 3 bits of Green + 5 bits of Blue + * D23: 5 bits of Red + 3 bits of Green + * ------------------------------------------------------------------------ */ + VSRI.8 D23, D22, #5 @ right shift G by 5 and insert to R + VSHL.U8 D22, D22, #3 @ left shift G by 3 + VSRI.8 D22, D21, #3 @ right shift B by 3 and insert to G + + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yyvup2rgb565 @ jump to trailing processing if remaining length is less than 8 + + VST2.U8 {D22,D23}, [p_rgb]! @ vector store Red, Green, Blue to destination + @ Blue at LSB + + BHI loop_yyvup2rgb565 @ loop if more than 8 pixels left + + BEQ end_yyvup2rgb565 @ done if exactly 8 pixel processed in the loop + + +trailing_yyvup2rgb565: + /*------------------------------------------------------------------------- + * There are from 1 ~ 7 pixels left in the trailing part. + * First adding 7 to the length so the length would be from 0 ~ 6. + * eg: 1 pixel left in the trailing part, so 1-8+7 = 0. + * Then save 1 pixel unconditionally since at least 1 pixels left in the + * trailing part. + * ------------------------------------------------------------------------ */ + ADDS length, length, #7 @ there are 7 or less in the trailing part + + VST2.U8 {D22[0],D23[0]}, [p_rgb]! @ at least 1 pixel left in the trailing part + BEQ end_yyvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D22[1],D23[1]}, [p_rgb]! @ store one more pixel + BEQ end_yyvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D22[2],D23[2]}, [p_rgb]! @ store one more pixel + BEQ end_yyvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D22[3],D23[3]}, [p_rgb]! @ store one more pixel + BEQ end_yyvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D22[4],D23[4]}, [p_rgb]! @ store one more pixel + BEQ end_yyvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D22[5],D23[5]}, [p_rgb]! @ store one more pixel + BEQ end_yyvup2rgb565 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST2.U8 {D22[6],D23[6]}, [p_rgb]! @ store one more pixel + +end_yyvup2rgb565: + LDMFD SP!, {PC} + + @ end of yyvup2rgb565 +#endif + +constants: + .hword (COEFF_V_RED), (COEFF_U_GREEN), (COEFF_V_GREEN), (COEFF_U_BLUE) @ 359 | -88 | -183 | 454 + .hword (COEFF_Y), (COEFF_0), (COEFF_255) , (COEFF_0) @ 256 | 0 | 255 | 0 + .word (COEFF_BIAS_R), (COEFF_BIAS_G), (COEFF_BIAS_B) @ -45824 | 34816 | -57984 | X + +/*-------------------------------------------------------------------------- +* FUNCTION : yvup2bgr888_venum +*-------------------------------------------------------------------------- +* DESCRIPTION : Perform YVU planar to BGR888 conversion. +*-------------------------------------------------------------------------- +* C PROTOTYPE : void yvup2bgr888_venum(uint8_t *p_y, +* uint8_t *p_cr, +* uint8_t *p_cb, +* uint8_t *p_bgr888, +* uint32_t length) +*-------------------------------------------------------------------------- +* REG INPUT : R0: uint8_t *p_y +* pointer to the input Y Line +* R1: uint8_t *p_cr +* pointer to the input Cr Line +* R2: uint8_t *p_cb +* pointer to the input Cb Line +* R3: uint8_t *p_bgr888 +* pointer to the output BGR Line +* R12: uint32_t length +* width of Line +*-------------------------------------------------------------------------- +* STACK ARG : None +*-------------------------------------------------------------------------- +* REG OUTPUT : None +*-------------------------------------------------------------------------- +* MEM INPUT : p_y - a line of Y pixels +* p_cr - a line of Cr pixels +* p_cb - a line of Cb pixels +* length - the width of the input line +*-------------------------------------------------------------------------- +* MEM OUTPUT : p_bgr888 - the converted bgr pixels +*-------------------------------------------------------------------------- +* REG AFFECTED : ARM: R0-R4, R12 +* NEON: Q0-Q15 +*-------------------------------------------------------------------------- +* STACK USAGE : none +*-------------------------------------------------------------------------- +* CYCLES : none +* +*-------------------------------------------------------------------------- +* NOTES : +*-------------------------------------------------------------------------- +*/ +.type yvup2bgr888_venum, %function +yvup2bgr888_venum: + + /*------------------------------------------------------------------------- + * Store stack registers + * ------------------------------------------------------------------------ */ + STMFD SP!, {LR} + + PLD [R0, R3] @ preload luma line + + ADR R12, constants + + VLD1.S16 {D6, D7}, [R12]! @ D6, D7: 359 | -88 | -183 | 454 | 256 | 0 | 255 | 0 + VLD1.S32 {D30, D31}, [R12] @ Q15 : -45824 | 34816 | -57984 | X + + /*------------------------------------------------------------------------- + * Load the 5th parameter via stack + * R0 ~ R3 are used to pass the first 4 parameters, the 5th and above + * parameters are passed via stack + * ------------------------------------------------------------------------ */ + LDR R12, [SP, #4] @ LR is the only one that has been pushed + @ into stack, increment SP by 4 to + @ get the parameter. + @ LDMIB SP, {R12} is an equivalent + @ instruction in this case, where only + @ one register was pushed into stack. + + /*------------------------------------------------------------------------- + * Load clamping parameters to duplicate vector elements + * ------------------------------------------------------------------------ */ + VDUP.S16 Q4, D7[1] @ Q4: 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + VDUP.S16 Q5, D7[2] @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + /*------------------------------------------------------------------------- + * Read bias + * ------------------------------------------------------------------------ */ + VDUP.S32 Q0, D30[0] @ Q0: -45824 | -45824 | -45824 | -45824 + VDUP.S32 Q1, D30[1] @ Q1: 34816 | 34816 | 34816 | 34816 + VDUP.S32 Q2, D31[0] @ Q2: -57984 | -57984 | -57984 | -57984 + + + /*------------------------------------------------------------------------- + * The main loop + * ------------------------------------------------------------------------ */ +loop_yvup2bgr888: + + /*------------------------------------------------------------------------- + * Load input from Y, V and U + * D12 : Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 + * D14 : V0 V1 V2 V3 V4 V5 V6 V7 + * D15 : U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VLD1.U8 {D12}, [p_y]! @ Load 8 Luma elements (uint8) to D12 + VLD1.U8 {D14}, [p_cr]! @ Load 8 Cr elements (uint8) to D14 + VLD1.U8 {D15}, [p_cb]! @ Load 8 Cb elements (uint8) to D15 + + /*------------------------------------------------------------------------- + * Expand uint8 value to uint16 + * D18, D19: Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 + * D20, D21: V0 V1 V2 V3 V4 V5 V6 V7 + * D22, D23: U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VMOVL.U8 Q9, D12 + VMOVL.U8 Q10, D14 + VMOVL.U8 Q11, D15 + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q12, D20, D6[0] @ Q12: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q13, D22, D6[1] @ Q13: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q13, D20, D6[2] @ Q13: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q14, D22, D6[3] @ Q14: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q12, Q0 @ Q12 add Red bias -45824 + VADD.S32 Q13, Q1 @ Q13 add Green bias 34816 + VADD.S32 Q14, Q2 @ Q14 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMLAL.S16 Q12, D18, D7[0] @ Q12: R0, R1, R2, R3 in 32-bit Q8 format + VMLAL.S16 Q13, D18, D7[0] @ Q13: G0, G1, G2, G3 in 32-bit Q8 format + VMLAL.S16 Q14, D18, D7[0] @ Q14: B0, B1, B2, B3 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D18 , Q12, #8 @ D18: R0, R1, R2, R3 in 16-bit Q0 format + VSHRN.S32 D20 , Q13, #8 @ D20: G0, G1, G2, G3 in 16-bit Q0 format + VSHRN.S32 D22, Q14, #8 @ D22: B0, B1, B2, B3 in 16-bit Q0 format + + /*------------------------------------------------------------------------- + * Done with the first 4 elements, continue on the next 4 elements + * ------------------------------------------------------------------------ */ + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q12, D21, D6[0] @ Q12: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q13, D23, D6[1] @ Q13: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q13, D21, D6[2] @ Q13: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q14, D23, D6[3] @ Q14: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q12, Q0 @ Q12 add Red bias -45824 + VADD.S32 Q13, Q1 @ Q13 add Green bias 34816 + VADD.S32 Q14, Q2 @ Q14 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMLAL.S16 Q12, D19, D7[0] @ Q12: R0, R1, R2, R3 in 32-bit Q8 format + VMLAL.S16 Q13, D19, D7[0] @ Q13: G0, G1, G2, G3 in 32-bit Q8 format + VMLAL.S16 Q14, D19, D7[0] @ Q14: B0, B1, B2, B3 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D19 , Q12, #8 @ D18: R0, R1, R2, R3 in 16-bit Q0 format + VSHRN.S32 D21 , Q13, #8 @ D20: G0, G1, G2, G3 in 16-bit Q0 format + VSHRN.S32 D23, Q14, #8 @ D22: B0, B1, B2, B3 in 16-bit Q0 format + + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q11, Q11, Q4 @ if Q11 < 0, Q11 = 0 + VMIN.S16 Q11, Q11, Q5 @ if Q11 > 255, Q11 = 255 + VQMOVUN.S16 D28, Q11 @ store Blue to D28, narrow the value from int16 to int8 + + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D27, Q10 @ store Green to D27, narrow the value from int16 to int8 + + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D26, Q9 @ store Red to D26, narrow the value from int16 to int8. + + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yvup2bgr888 @ jump to trailing processing if remaining length is less than 8 + + VST3.U8 {D26,D27,D28}, [p_bgr]! @ vector store Red, Green, Blue to destination + @ Blue at LSB + + BHI loop_yvup2bgr888 @ loop if more than 8 pixels left + + BEQ end_yvup2bgr888 @ done if exactly 8 pixel processed in the loop + + +trailing_yvup2bgr888: + /*------------------------------------------------------------------------- + * There are from 1 ~ 7 pixels left in the trailing part. + * First adding 7 to the length so the length would be from 0 ~ 6. + * eg: 1 pixel left in the trailing part, so 1-8+7 = 0. + * Then save 1 pixel unconditionally since at least 1 pixels left in the + * trailing part. + * ------------------------------------------------------------------------ */ + ADDS length, length, #7 @ there are 7 or less in the trailing part + + VST3.U8 {D26[0], D27[0], D28[0]}, [p_bgr]! @ at least 1 pixel left in the trailing part + BEQ end_yvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D26[1], D27[1], D28[1]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D26[2], D27[2], D28[2]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D26[3], D27[3], D28[3]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D26[4], D27[4], D28[4]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D26[5], D27[5], D28[5]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D26[6], D27[6], D28[6]}, [p_bgr]! @ store one more pixel + +end_yvup2bgr888: + LDMFD SP!, {PC} + + @ end of yvup2bgr888 + + +/*------------------------------------------------------------------------- +* FUNCTION : yyvup2bgr888_venum +*-------------------------------------------------------------------------- +* DESCRIPTION : Perform YYVU planar to BGR888 conversion. +*-------------------------------------------------------------------------- +* C PROTOTYPE : void yyvup2bgr888_venum(uint8_t *p_y, +* uint8_t *p_cr, +* uint8_t *p_cb, +* uint8_t *p_bgr888, +* uint32_t length) +*-------------------------------------------------------------------------- +* REG INPUT : R0: uint8_t *p_y +* pointer to the input Y Line +* R1: uint8_t *p_cr +* pointer to the input Cr Line +* R2: uint8_t *p_cb +* pointer to the input Cb Line +* R3: uint8_t *p_bgr888 +* pointer to the output BGR Line +* R12: uint32_t length +* width of Line +*-------------------------------------------------------------------------- +* STACK ARG : None +*-------------------------------------------------------------------------- +* REG OUTPUT : None +*-------------------------------------------------------------------------- +* MEM INPUT : p_y - a line of Y pixels +* p_cr - a line of Cr pixels +* p_cb - a line of Cb pixels +* length - the width of the input line +*-------------------------------------------------------------------------- +* MEM OUTPUT : p_bgr888 - the converted bgr pixels +*-------------------------------------------------------------------------- +* REG AFFECTED : ARM: R0-R4, R12 +* NEON: Q0-Q15 +*-------------------------------------------------------------------------- +* STACK USAGE : none +*-------------------------------------------------------------------------- +* CYCLES : none +* +*-------------------------------------------------------------------------- +* NOTES : +*-------------------------------------------------------------------------- +*/ +.type yyvup2bgr888_venum, %function +yyvup2bgr888_venum: + /*------------------------------------------------------------------------- + * Store stack registers + * ------------------------------------------------------------------------ */ + STMFD SP!, {LR} + + PLD [R0, R3] @ preload luma line + + ADR R12, constants + + VLD1.S16 {D6, D7}, [R12]! @ D6, D7: 359 | -88 | -183 | 454 | 256 | 0 | 255 | 0 + VLD1.S32 {D30, D31}, [R12] @ Q15 : -45824 | 34816 | -57984 | X + + /*------------------------------------------------------------------------- + * Load the 5th parameter via stack + * R0 ~ R3 are used to pass the first 4 parameters, the 5th and above + * parameters are passed via stack + * ------------------------------------------------------------------------ */ + LDR R12, [SP, #4] @ LR is the only one that has been pushed + @ into stack, increment SP by 4 to + @ get the parameter. + @ LDMIB SP, {R12} is an equivalent + @ instruction in this case, where only + @ one register was pushed into stack. + + /*------------------------------------------------------------------------- + * Load clamping parameters to duplicate vector elements + * ------------------------------------------------------------------------ */ + VDUP.S16 Q4, D7[1] @ Q4: 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + VDUP.S16 Q5, D7[2] @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + /*------------------------------------------------------------------------- + * Read bias + * ------------------------------------------------------------------------ */ + VDUP.S32 Q0, D30[0] @ Q0: -45824 | -45824 | -45824 | -45824 + VDUP.S32 Q1, D30[1] @ Q1: 34816 | 34816 | 34816 | 34816 + VDUP.S32 Q2, D31[0] @ Q2: -70688 | -70688 | -70688 | -70688 + + + /*------------------------------------------------------------------------- + * The main loop + * ------------------------------------------------------------------------ */ +loop_yyvup2bgr888: + + /*------------------------------------------------------------------------- + * Load input from Y, V and U + * D12, D13: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14, Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15 + * D14 : V0 V1 V2 V3 V4 V5 V6 V7 + * D15 : U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VLD2.U8 {D12,D13}, [p_y]! @ Load 16 Luma elements (uint8) to D12, D13 + VLD1.U8 {D14}, [p_cr]! @ Load 8 Cr elements (uint8) to D14 + VLD1.U8 {D15}, [p_cb]! @ Load 8 Cb elements (uint8) to D15 + + /*------------------------------------------------------------------------- + * Expand uint8 value to uint16 + * D24, D25: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14 + * D26, D27: Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15 + * D28, D29: V0 V1 V2 V3 V4 V5 V6 V7 + * D30, D31: U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VMOVL.U8 Q12, D12 + VMOVL.U8 Q13, D13 + VMOVL.U8 Q14, D14 + VMOVL.U8 Q15, D15 + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q6, D28, D6[0] @ Q6: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q7, D30, D6[1] @ Q7: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q7, D28, D6[2] @ q7: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q8, D30, D6[3] @ q8: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q6, Q0 @ Q6 add Red bias -45824 + VADD.S32 Q7, Q1 @ Q7 add Green bias 34816 + VADD.S32 Q8, Q2 @ Q8 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMOV.S32 Q9, Q6 + VMLAL.S16 Q6, D24, D7[0] @ Q6: R0, R2, R4, R6 in 32-bit Q8 format + VMLAL.S16 Q9, D26, D7[0] @ Q9: R1, R3, R5, R7 in 32-bit Q8 format + + VMOV.S32 Q10, Q7 + VMLAL.S16 Q7, D24, D7[0] @ Q7: G0, G2, G4, G6 in 32-bit Q8 format + VMLAL.S16 Q10, D26, D7[0] @ Q10: G1, G3, G5, G7 in 32-bit Q8 format + + VMOV.S32 Q11, Q8 + VMLAL.S16 Q8, D24, D7[0] @ Q8: B0, B2, B4, B6 in 32-bit Q8 format + VMLAL.S16 Q11, D26, D7[0] @ Q11: B1, B3, B5, B7 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D12, Q6, #8 @ D12: R0 R2 R4 R6 in 16-bit Q0 format + VSHRN.S32 D13, Q9, #8 @ D13: R1 R3 R5 R7 in 16-bit Q0 format + VZIP.16 D12, D13 @ Q6 : R0 R1 R2 R3 R4 R5 R6 R7 + + VSHRN.S32 D18, Q7, #8 @ D18: G0 G2 G4 G6 in 16-bit Q0 format + VSHRN.S32 D19, Q10, #8 @ D19: G1 G3 G5 G7 in 16-bit Q0 format + VZIP.16 D18, D19 @ Q9 : G0 G1 G2 G3 G4 G5 G6 G7 + + VSHRN.S32 D20, Q8, #8 @ D20: B0 B2 B4 B6 in 16-bit Q0 format + VSHRN.S32 D21, Q11, #8 @ D21: B1 B3 B5 B7 in 16-bit Q0 format + VZIP.16 D20, D21 @ Q10: B0 B1 B2 B3 B4 B5 B6 B7 + + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D23, Q10 @ store Blue to D23, narrow the value from int16 to int8 + + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D22, Q9 @ store Green to D22, narrow the value from int16 to int8 + + VMAX.S16 Q6, Q6, Q4 @ if Q6 < 0, Q6 = 0 + VMIN.S16 Q6, Q6, Q5 @ if Q6 > 255, Q6 = 255 + VQMOVUN.S16 D21, Q6 @ store Red to D21, narrow the value from int16 to int8 + + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yyvup2bgr888 @ jump to trailing processing if remaining length is less than 8 + + VST3.U8 {D21,D22,D23}, [p_bgr]! @ vector store Blue, Green, Red to destination + @ Red at LSB + + BEQ end_yyvup2bgr888 @ done if exactly 8 pixel processed in the loop + + /*------------------------------------------------------------------------- + * Done with the first 8 elements, continue on the next 8 elements + * ------------------------------------------------------------------------ */ + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q6, D29, D6[0] @ Q6: 359*(V4,V5,V6,V7) Red + VMULL.S16 Q7, D31, D6[1] @ Q7: -88*(U4,U5,U6,U7) Green + VMLAL.S16 Q7, D29, D6[2] @ Q7: -88*(U4,U5,U6,U7) - 183*(V4,V5,V6,V7) + VMULL.S16 Q8, D31, D6[3] @ Q8: 454*(U4,U5,U6,U7) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q6, Q0 @ Q6 add Red bias -45824 + VADD.S32 Q7, Q1 @ Q7 add Green bias 34816 + VADD.S32 Q8, Q2 @ Q8 add Blue bias -70688 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMOV.S32 Q9, Q6 + VMLAL.S16 Q6, D25, D7[0] @ Q6: R8 R10 R12 R14 in 32-bit Q8 format + VMLAL.S16 Q9, D27, D7[0] @ Q9: R9 R11 R13 R15 in 32-bit Q8 format + + VMOV.S32 Q10, Q7 + VMLAL.S16 Q7, D25, D7[0] @ Q7: G0, G2, G4, G6 in 32-bit Q8 format + VMLAL.S16 Q10, D27, D7[0] @ Q10 : G1, G3, G5, G7 in 32-bit Q8 format + + VMOV.S32 Q11, Q8 + VMLAL.S16 Q8, D25, D7[0] @ Q8: B0, B2, B4, B6 in 32-bit Q8 format + VMLAL.S16 Q11, D27, D7[0] @ Q11 : B1, B3, B5, B7 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D12, Q6, #8 @ D12: R8 R10 R12 R14 in 16-bit Q0 format + VSHRN.S32 D13, Q9, #8 @ D13: R9 R11 R13 R15 in 16-bit Q0 format + VZIP.16 D12, D13 @ Q6: R8 R9 R10 R11 R12 R13 R14 R15 + + VSHRN.S32 D18, Q7, #8 @ D18: G8 G10 G12 G14 in 16-bit Q0 format + VSHRN.S32 D19, Q10, #8 @ D19: G9 G11 G13 G15 in 16-bit Q0 format + VZIP.16 D18, D19 @ Q9: G8 G9 G10 G11 G12 G13 G14 G15 + + VSHRN.S32 D20, Q8, #8 @ D20: B8 B10 B12 B14 in 16-bit Q0 format + VSHRN.S32 D21, Q11, #8 @ D21: B9 B11 B13 B15 in 16-bit Q0 format + VZIP.16 D20, D21 @ Q10: B8 B9 B10 B11 B12 B13 B14 B15 + + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D23, Q10 @ store Blue to D23, narrow the value from int16 to int8 + + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D22, Q9 @ store Green to D22, narrow the value from int16 to int8 + + VMAX.S16 Q6, Q6, Q4 @ if Q6 < 0, Q6 = 0 + VMIN.S16 Q6, Q6, Q5 @ if Q6 > 255, Q6 = 255 + VQMOVUN.S16 D21, Q6 @ store Red to D21, narrow the value from int16 to int8 + + + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yyvup2bgr888 @ jump to trailing processing if remaining length is less than 8 + + VST3.U8 {D21,D22,D23}, [p_bgr]! @ vector store Blue, Green, Red to destination + @ Red at LSB + + BHI loop_yyvup2bgr888 @ loop if more than 8 pixels left + + BEQ end_yyvup2bgr888 @ done if exactly 8 pixel processed in the loop + + +trailing_yyvup2bgr888: + /*------------------------------------------------------------------------- + * There are from 1 ~ 7 pixels left in the trailing part. + * First adding 7 to the length so the length would be from 0 ~ 6. + * eg: 1 pixel left in the trailing part, so 1-8+7 = 0. + * Then save 1 pixel unconditionally since at least 1 pixels left in the + * trailing part. + * ------------------------------------------------------------------------ */ + ADDS length, length, #7 @ there are 7 or less in the trailing part + + VST3.U8 {D21[0],D22[0],D23[0]}, [p_bgr]! @ at least 1 pixel left in the trailing part + BEQ end_yyvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D21[1],D22[1],D23[1]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D21[2],D22[2],D23[2]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D21[3],D22[3],D23[3]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D21[4],D22[4],D23[4]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D21[5],D22[5],D23[5]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2bgr888 @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST3.U8 {D21[6],D22[6],D23[6]}, [p_bgr]! @ store one more pixel + +end_yyvup2bgr888: + LDMFD SP!, {PC} + + @ end of yyvup2bgr888 + +/*-------------------------------------------------------------------------- +* FUNCTION : yvup2abgr8888_venum +*-------------------------------------------------------------------------- +* DESCRIPTION : Perform YVU planar to ABGR8888 conversion. +*-------------------------------------------------------------------------- +* C PROTOTYPE : void yvup2abgr8888_venum(uint8_t *p_y, +* uint8_t *p_cr, +* uint8_t *p_cb, +* uint8_t *p_abgr8888, +* uint32_t length) +*-------------------------------------------------------------------------- +* REG INPUT : R0: uint8_t *p_y +* pointer to the input Y Line +* R1: uint8_t *p_cr +* pointer to the input Cr Line +* R2: uint8_t *p_cb +* pointer to the input Cb Line +* R3: uint8_t *p_abgr8888 +* pointer to the output ABGR Line +* R12: uint32_t length +* width of Line +*-------------------------------------------------------------------------- +* STACK ARG : None +*-------------------------------------------------------------------------- +* REG OUTPUT : None +*-------------------------------------------------------------------------- +* MEM INPUT : p_y - a line of Y pixels +* p_cr - a line of Cr pixels +* p_cb - a line of Cb pixels +* length - the width of the input line +*-------------------------------------------------------------------------- +* MEM OUTPUT : p_abgr8888 - the converted ABGR pixels +*-------------------------------------------------------------------------- +* REG AFFECTED : ARM: R0-R4, R12 +* NEON: Q0-Q15 +*-------------------------------------------------------------------------- +* STACK USAGE : none +*-------------------------------------------------------------------------- +* CYCLES : none +* +*-------------------------------------------------------------------------- +* NOTES : +*-------------------------------------------------------------------------- +*/ +.type yvup2abgr8888_venum, %function +yvup2abgr8888_venum: + /*------------------------------------------------------------------------- + * Store stack registers + * ------------------------------------------------------------------------ */ + STMFD SP!, {LR} + + PLD [R0, R3] @ preload luma line + + ADR R12, constants + + VLD1.S16 {D6, D7}, [R12]! @ D6, D7: 359 | -88 | -183 | 454 | 256 | 0 | 255 | 0 + VLD1.S32 {D30, D31}, [R12] @ Q15 : -45824 | 34816 | -57984 | X + + /*------------------------------------------------------------------------- + * Load the 5th parameter via stack + * R0 ~ R3 are used to pass the first 4 parameters, the 5th and above + * parameters are passed via stack + * ------------------------------------------------------------------------ */ + LDR R12, [SP, #4] @ LR is the only one that has been pushed + @ into stack, increment SP by 4 to + @ get the parameter. + @ LDMIB SP, {R12} is an equivalent + @ instruction in this case, where only + @ one register was pushed into stack. + + /*------------------------------------------------------------------------- + * Load clamping parameters to duplicate vector elements + * ------------------------------------------------------------------------ */ + VDUP.S16 Q4, D7[1] @ Q4: 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + VDUP.S16 Q5, D7[2] @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + /*------------------------------------------------------------------------- + * Read bias + * ------------------------------------------------------------------------ */ + VDUP.S32 Q0, D30[0] @ Q0: -45824 | -45824 | -45824 | -45824 + VDUP.S32 Q1, D30[1] @ Q1: 34816 | 34816 | 34816 | 34816 + VDUP.S32 Q2, D31[0] @ Q2: -70688 | -70688 | -70688 | -70688 + + + /*------------------------------------------------------------------------- + * The main loop + * ------------------------------------------------------------------------ */ +loop_yvup2abgr: + + /*------------------------------------------------------------------------- + * Load input from Y, V and U + * D12 : Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 + * D14 : V0 V1 V2 V3 V4 V5 V6 V7 + * D15 : U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VLD1.U8 {D12}, [p_y]! @ Load 8 Luma elements (uint8) to D12 + VLD1.U8 {D14}, [p_cr]! @ Load 8 Cr elements (uint8) to D14 + VLD1.U8 {D15}, [p_cb]! @ Load 8 Cb elements (uint8) to D15 + + /*------------------------------------------------------------------------- + * Expand uint8 value to uint16 + * D18, D19: Y0 Y1 Y2 Y3 Y4 Y5 Y6 Y7 + * D20, D21: V0 V1 V2 V3 V4 V5 V6 V7 + * D22, D23: U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VMOVL.U8 Q9, D12 + VMOVL.U8 Q10, D14 + VMOVL.U8 Q11, D15 + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q12, D20, D6[0] @ Q12: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q13, D22, D6[1] @ Q13: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q13, D20, D6[2] @ Q13: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q14, D22, D6[3] @ Q14: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q12, Q0 @ Q12 add Red bias -45824 + VADD.S32 Q13, Q1 @ Q13 add Green bias 34816 + VADD.S32 Q14, Q2 @ Q14 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMLAL.S16 Q12, D18, D7[0] @ Q12: R0, R1, R2, R3 in 32-bit Q8 format + VMLAL.S16 Q13, D18, D7[0] @ Q13: G0, G1, G2, G3 in 32-bit Q8 format + VMLAL.S16 Q14, D18, D7[0] @ Q14: B0, B1, B2, B3 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D18 , Q12, #8 @ D18: R0, R1, R2, R3 in 16-bit Q0 format + VSHRN.S32 D20 , Q13, #8 @ D20: G0, G1, G2, G3 in 16-bit Q0 format + VSHRN.S32 D22, Q14, #8 @ D22: B0, B1, B2, B3 in 16-bit Q0 format + + /*------------------------------------------------------------------------- + * Done with the first 4 elements, continue on the next 4 elements + * ------------------------------------------------------------------------ */ + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q12, D21, D6[0] @ Q12: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q13, D23, D6[1] @ Q13: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q13, D21, D6[2] @ Q13: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q14, D23, D6[3] @ Q14: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q12, Q0 @ Q12 add Red bias -45824 + VADD.S32 Q13, Q1 @ Q13 add Green bias 34816 + VADD.S32 Q14, Q2 @ Q14 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMLAL.S16 Q12, D19, D7[0] @ Q12: R0, R1, R2, R3 in 32-bit Q8 format + VMLAL.S16 Q13, D19, D7[0] @ Q13: G0, G1, G2, G3 in 32-bit Q8 format + VMLAL.S16 Q14, D19, D7[0] @ Q14: B0, B1, B2, B3 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D19 , Q12, #8 @ D18: R0, R1, R2, R3 in 16-bit Q0 format + VSHRN.S32 D21 , Q13, #8 @ D20: G0, G1, G2, G3 in 16-bit Q0 format + VSHRN.S32 D23, Q14, #8 @ D22: B0, B1, B2, B3 in 16-bit Q0 format + + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q11, Q11, Q4 @ if Q11 < 0, Q11 = 0 + VMIN.S16 Q11, Q11, Q5 @ if Q11 > 255, Q11 = 255 + VQMOVUN.S16 D28, Q11 @ store Blue to D28, narrow the value from int16 to int8 + + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D27, Q10 @ store Green to D27, narrow the value from int16 to int8 + + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D26, Q9 @ store Red to D26, narrow the value from int16 to int8 + + /*------------------------------------------------------------------------- + * abgr format with leading 0xFF byte + * ------------------------------------------------------------------------ */ + VMOVN.I16 D29, Q5 @ D29: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yvup2abgr @ jump to trailing processing if remaining length is less than 8 + + VST4.U8 {D26,D27,D28,D29}, [p_bgr]! @ vector store Red, Green, Blue to destination + @ Blue at LSB + + BHI loop_yvup2abgr @ loop if more than 8 pixels left + + BEQ end_yvup2abgr @ done if exactly 8 pixel processed in the loop + + +trailing_yvup2abgr: + /*------------------------------------------------------------------------- + * There are from 1 ~ 7 pixels left in the trailing part. + * First adding 7 to the length so the length would be from 0 ~ 6. + * eg: 1 pixel left in the trailing part, so 1-8+7 = 0. + * Then save 1 pixel unconditionally since at least 1 pixels left in the + * trailing part. + * ------------------------------------------------------------------------ */ + ADDS length, length, #7 @ there are 7 or less in the trailing part + + VST4.U8 {D26[0], D27[0], D28[0], D29[0]}, [p_bgr]! @ at least 1 pixel left in the trailing part + BEQ end_yvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D26[1], D27[1], D28[1], D29[1]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D26[2], D27[2], D28[2], D29[2]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D26[3], D27[3], D28[3], D29[3]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D26[4], D27[4], D28[4], D29[4]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D26[5], D27[5], D28[5], D29[5]}, [p_bgr]! @ store one more pixel + BEQ end_yvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D26[6], D27[6], D28[6], D29[6]}, [p_bgr]! @ store one more pixel + +end_yvup2abgr: + LDMFD SP!, {PC} + @ end of yvup2abgr + +/*-------------------------------------------------------------------------- +* FUNCTION : yyvup2abgr8888_venum +*-------------------------------------------------------------------------- +* DESCRIPTION : Perform YYVU planar to ABGR8888 conversion. +*-------------------------------------------------------------------------- +* C PROTOTYPE : void yyvup2abgr8888_venum(uint8_t *p_y, +* uint8_t *p_cr, +* uint8_t *p_cb, +* uint8_t *p_abgr8888, +* uint32_t length) +*-------------------------------------------------------------------------- +* REG INPUT : R0: uint8_t *p_y +* pointer to the input Y Line +* R1: uint8_t *p_cr +* pointer to the input Cr Line +* R2: uint8_t *p_cb +* pointer to the input Cb Line +* R3: uint8_t *p_abgr8888 +* pointer to the output ABGR Line +* R12: uint32_t length +* width of Line +*-------------------------------------------------------------------------- +* STACK ARG : None +*-------------------------------------------------------------------------- +* REG OUTPUT : None +*-------------------------------------------------------------------------- +* MEM INPUT : p_y - a line of Y pixels +* p_cr - a line of Cr pixels +* p_cb - a line of Cb pixels +* length - the width of the input line +*-------------------------------------------------------------------------- +* MEM OUTPUT : p_abgr8888 - the converted ABGR pixels +*-------------------------------------------------------------------------- +* REG AFFECTED : ARM: R0-R4, R12 +* NEON: Q0-Q15 +*-------------------------------------------------------------------------- +* STACK USAGE : none +*-------------------------------------------------------------------------- +* CYCLES : none +* +*-------------------------------------------------------------------------- +* NOTES : +*-------------------------------------------------------------------------- +*/ +.type yyvup2abgr8888_venum, %function +yyvup2abgr8888_venum: + /*------------------------------------------------------------------------- + * Store stack registers + * ------------------------------------------------------------------------ */ + STMFD SP!, {LR} + + PLD [R0, R3] @ preload luma line + + ADR R12, constants + + VLD1.S16 {D6, D7}, [R12]! @ D6, D7: 359 | -88 | -183 | 454 | 256 | 0 | 255 | 0 + VLD1.S32 {D30, D31}, [R12] @ Q15 : -45824 | 34816 | -57984 | X + + /*------------------------------------------------------------------------- + * Load the 5th parameter via stack + * R0 ~ R3 are used to pass the first 4 parameters, the 5th and above + * parameters are passed via stack + * ------------------------------------------------------------------------ */ + LDR R12, [SP, #4] @ LR is the only one that has been pushed + @ into stack, increment SP by 4 to + @ get the parameter. + @ LDMIB SP, {R12} is an equivalent + @ instruction in this case, where only + @ one register was pushed into stack. + + /*------------------------------------------------------------------------- + * Load clamping parameters to duplicate vector elements + * ------------------------------------------------------------------------ */ + VDUP.S16 Q4, D7[1] @ Q4: 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 + VDUP.S16 Q5, D7[2] @ Q5: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + /*------------------------------------------------------------------------- + * Read bias + * ------------------------------------------------------------------------ */ + VDUP.S32 Q0, D30[0] @ Q0: -45824 | -45824 | -45824 | -45824 + VDUP.S32 Q1, D30[1] @ Q1: 34816 | 34816 | 34816 | 34816 + VDUP.S32 Q2, D31[0] @ Q2: -70688 | -70688 | -70688 | -70688 + + + /*------------------------------------------------------------------------- + * The main loop + * ------------------------------------------------------------------------ */ +loop_yyvup2abgr: + + /*------------------------------------------------------------------------- + * Load input from Y, V and U + * D12, D13: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14, Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15 + * D14 : V0 V1 V2 V3 V4 V5 V6 V7 + * D15 : U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VLD2.U8 {D12,D13}, [p_y]! @ Load 16 Luma elements (uint8) to D12, D13 + VLD1.U8 {D14}, [p_cr]! @ Load 8 Cr elements (uint8) to D14 + VLD1.U8 {D15}, [p_cb]! @ Load 8 Cb elements (uint8) to D15 + + /*------------------------------------------------------------------------- + * Expand uint8 value to uint16 + * D24, D25: Y0 Y2 Y4 Y6 Y8 Y10 Y12 Y14 + * D26, D27: Y1 Y3 Y5 Y7 Y9 Y11 Y13 Y15 + * D28, D29: V0 V1 V2 V3 V4 V5 V6 V7 + * D30, D31: U0 U1 U2 U3 U4 U5 U6 U7 + * ------------------------------------------------------------------------ */ + VMOVL.U8 Q12, D12 + VMOVL.U8 Q13, D13 + VMOVL.U8 Q14, D14 + VMOVL.U8 Q15, D15 + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q6, D28, D6[0] @ Q6: 359*(V0,V1,V2,V3) Red + VMULL.S16 Q7, D30, D6[1] @ Q7: -88*(U0,U1,U2,U3) Green + VMLAL.S16 Q7, D28, D6[2] @ Q7: -88*(U0,U1,U2,U3) - 183*(V0,V1,V2,V3) + VMULL.S16 Q8, D30, D6[3] @ Q8: 454*(U0,U1,U2,U3) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q6, Q0 @ Q6 add Red bias -45824 + VADD.S32 Q7, Q1 @ Q7 add Green bias 34816 + VADD.S32 Q8, Q2 @ Q8 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMOV.S32 Q9, Q6 + VMLAL.S16 Q6, D24, D7[0] @ Q6: R0, R2, R4, R6 in 32-bit Q8 format + VMLAL.S16 Q9, D26, D7[0] @ Q9: R1, R3, R5, R7 in 32-bit Q8 format + + VMOV.S32 Q10, Q7 + VMLAL.S16 Q7, D24, D7[0] @ Q7: G0, G2, G4, G6 in 32-bit Q8 format + VMLAL.S16 Q10, D26, D7[0] @ Q10: G1, G3, G5, G7 in 32-bit Q8 format + + VMOV.S32 Q11, Q8 + VMLAL.S16 Q8, D24, D7[0] @ Q8: B0, B2, B4, B6 in 32-bit Q8 format + VMLAL.S16 Q11, D26, D7[0] @ Q11: B1, B3, B5, B7 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D12, Q6, #8 @ D12: R0 R2 R4 R6 in 16-bit Q0 format + VSHRN.S32 D13, Q9, #8 @ D13: R1 R3 R5 R7 in 16-bit Q0 format + VZIP.16 D12, D13 @ Q6 : R0 R1 R2 R3 R4 R5 R6 R7 + + VSHRN.S32 D18, Q7, #8 @ D18: G0 G2 G4 G6 in 16-bit Q0 format + VSHRN.S32 D19, Q10, #8 @ D19: G1 G3 G5 G7 in 16-bit Q0 format + VZIP.16 D18, D19 @ Q9 : G0 G1 G2 G3 G4 G5 G6 G7 + + VSHRN.S32 D20, Q8, #8 @ D20: B0 B2 B4 B6 in 16-bit Q0 format + VSHRN.S32 D21, Q11, #8 @ D21: B1 B3 B5 B7 in 16-bit Q0 format + VZIP.16 D20, D21 @ Q10: B0 B1 B2 B3 B4 B5 B6 B7 + + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D23, Q10 @ store Blue to D23, narrow the value from int16 to int8 + + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D22, Q9 @ store Green to D22, narrow the value from int16 to int8 + + VMAX.S16 Q6, Q6, Q4 @ if Q6 < 0, Q6 = 0 + VMIN.S16 Q6, Q6, Q5 @ if Q6 > 255, Q6 = 255 + VQMOVUN.S16 D21, Q6 @ store Red to D21, narrow the value from int16 to int8 + + /*------------------------------------------------------------------------- + * abgr format with leading 0xFF byte + * ------------------------------------------------------------------------ */ + VMOVN.I16 D24, Q5 @ D24: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yyvup2abgr @ jump to trailing processing if remaining length is less than 8 + + VST4.U8 {D21,D22,D23,D24}, [p_bgr]! @ vector store Blue, Green, Red to destination + @ Red at LSB + + BEQ end_yyvup2abgr @ done if exactly 8 pixel processed in the loop + + + /*------------------------------------------------------------------------- + * Done with the first 8 elements, continue on the next 8 elements + * ------------------------------------------------------------------------ */ + + /*------------------------------------------------------------------------- + * Multiply contribution from chrominance, results are in 32-bit + * ------------------------------------------------------------------------ */ + VMULL.S16 Q6, D29, D6[0] @ Q6: 359*(V4,V5,V6,V7) Red + VMULL.S16 Q7, D31, D6[1] @ Q7: -88*(U4,U5,U6,U7) Green + VMLAL.S16 Q7, D29, D6[2] @ Q7: -88*(U4,U5,U6,U7) - 183*(V4,V5,V6,V7) + VMULL.S16 Q8, D31, D6[3] @ Q8: 454*(U4,U5,U6,U7) Blue + + /*------------------------------------------------------------------------- + * Add bias + * ------------------------------------------------------------------------ */ + VADD.S32 Q6, Q0 @ Q6 add Red bias -45824 + VADD.S32 Q7, Q1 @ Q7 add Green bias 34816 + VADD.S32 Q8, Q2 @ Q8 add Blue bias -57984 + + /*------------------------------------------------------------------------- + * Calculate Red, Green, Blue + * ------------------------------------------------------------------------ */ + VMOV.S32 Q9, Q6 + VMLAL.S16 Q6, D25, D7[0] @ Q6: R8 R10 R12 R14 in 32-bit Q8 format + VMLAL.S16 Q9, D27, D7[0] @ Q9: R9 R11 R13 R15 in 32-bit Q8 format + + VMOV.S32 Q10, Q7 + VMLAL.S16 Q7, D25, D7[0] @ Q7: G0, G2, G4, G6 in 32-bit Q8 format + VMLAL.S16 Q10, D27, D7[0] @ Q10 : G1, G3, G5, G7 in 32-bit Q8 format + + VMOV.S32 Q11, Q8 + VMLAL.S16 Q8, D25, D7[0] @ Q8: B0, B2, B4, B6 in 32-bit Q8 format + VMLAL.S16 Q11, D27, D7[0] @ Q11 : B1, B3, B5, B7 in 32-bit Q8 format + + /*------------------------------------------------------------------------- + * Right shift eight bits with rounding + * ------------------------------------------------------------------------ */ + VSHRN.S32 D12, Q6, #8 @ D12: R8 R10 R12 R14 in 16-bit Q0 format + VSHRN.S32 D13, Q9, #8 @ D13: R9 R11 R13 R15 in 16-bit Q0 format + VZIP.16 D12, D13 @ Q6: R8 R9 R10 R11 R12 R13 R14 R15 + + VSHRN.S32 D18, Q7, #8 @ D18: G8 G10 G12 G14 in 16-bit Q0 format + VSHRN.S32 D19, Q10, #8 @ D19: G9 G11 G13 G15 in 16-bit Q0 format + VZIP.16 D18, D19 @ Q9: G8 G9 G10 G11 G12 G13 G14 G15 + + VSHRN.S32 D20, Q8, #8 @ D20: B8 B10 B12 B14 in 16-bit Q0 format + VSHRN.S32 D21, Q11, #8 @ D21: B9 B11 B13 B15 in 16-bit Q0 format + VZIP.16 D20, D21 @ Q10: B8 B9 B10 B11 B12 B13 B14 B15 + + /*------------------------------------------------------------------------- + * Clamp the value to be within [0~255] + * ------------------------------------------------------------------------ */ + VMAX.S16 Q10, Q10, Q4 @ if Q10 < 0, Q10 = 0 + VMIN.S16 Q10, Q10, Q5 @ if Q10 > 255, Q10 = 255 + VQMOVUN.S16 D23, Q10 @ store Blue to D23, narrow the value from int16 to int8 + + VMAX.S16 Q9, Q9, Q4 @ if Q9 < 0, Q9 = 0 + VMIN.S16 Q9, Q9, Q5 @ if Q9 > 255, Q9 = 255 + VQMOVUN.S16 D22, Q9 @ store Green to D22, narrow the value from int16 to int8 + + VMAX.S16 Q6, Q6, Q4 @ if Q6 < 0, Q6 = 0 + VMIN.S16 Q6, Q6, Q5 @ if Q6 > 255, Q6 = 255 + VQMOVUN.S16 D21, Q6 @ store Red to D21, narrow the value from int16 to int8 + + /*------------------------------------------------------------------------- + * abgr format with leading 0xFF byte + * ------------------------------------------------------------------------ */ + VMOVN.I16 D24, Q5 @ D24: 255 | 255 | 255 | 255 | 255 | 255 | 255 | 255 + + SUBS length, length, #8 @ check if the length is less than 8 + + BMI trailing_yyvup2abgr @ jump to trailing processing if remaining length is less than 8 + + VST4.U8 {D21,D22,D23,D24}, [p_bgr]! @ vector store Blue, Green, Red to destination + @ Red at LSB + + BHI loop_yyvup2abgr @ loop if more than 8 pixels left + + BEQ end_yyvup2abgr @ done if exactly 8 pixel processed in the loop + + +trailing_yyvup2abgr: + /*------------------------------------------------------------------------- + * There are from 1 ~ 7 pixels left in the trailing part. + * First adding 7 to the length so the length would be from 0 ~ 6. + * eg: 1 pixel left in the trailing part, so 1-8+7 = 0. + * Then save 1 pixel unconditionally since at least 1 pixels left in the + * trailing part. + * ------------------------------------------------------------------------ */ + ADDS length, length, #7 @ there are 7 or less in the trailing part + + VST4.U8 {D21[0],D22[0],D23[0],D24[0]}, [p_bgr]! @ at least 1 pixel left in the trailing part + BEQ end_yyvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D21[1],D22[1],D23[1],D24[1]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D21[2],D22[2],D23[2],D24[2]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D21[3],D22[3],D23[3],D24[3]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D21[4],D22[4],D23[4],D24[4]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D21[5],D22[5],D23[5],D24[5]}, [p_bgr]! @ store one more pixel + BEQ end_yyvup2abgr @ done if 0 pixel left + + SUBS length, length, #1 @ update length counter + VST4.U8 {D21[6],D22[6],D23[6],D24[6]}, [p_bgr]! @ store one more pixel + +end_yyvup2abgr: + LDMFD SP!, {PC} + @ end of yyvup2abgr + +.end diff --git a/asm/armv7/jdidct-armv7.S b/asm/armv7/jdidct-armv7.S new file mode 100644 index 0000000..d61e219 --- /dev/null +++ b/asm/armv7/jdidct-armv7.S @@ -0,0 +1,762 @@ +/*========================================================================= +* jdidct-armv7.s +* +* Copyright (c) 2010, Code Aurora Forum. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of Code Aurora Forum, Inc. nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*========================================================================== + +*========================================================================== +* FUNCTION LIST +*-------------------------------------------------------------------------- +* - idct_1x1_venum +* - idct_2x2_venum +* - idct_4x4_venum +* - idct_8x8_venum +* +*========================================================================== +*/ + +@========================================================================== +@ MACRO DEFINITION +@========================================================================== + .macro Transpose8x8 + @================================================================== + @ Transpose an 8 x 8 x 16 bit matrix in place + @ Input: q8 to q15 + @ Output: q8 to q15 + @ Registers used: q8 to q15 + @ Assumptions: 8 x 8 x 16 bit data + @================================================================== + + vswp d17, d24 @q8, q12 + vswp d23, d30 @q11, q15 + vswp d21, d28 @q10, q14 + vswp d19, d26 @q9, q13 + + vtrn.32 q8, q10 + vtrn.32 q9, q11 + vtrn.32 q12, q14 + vtrn.32 q13, q15 + + vtrn.16 q8, q9 + vtrn.16 q10, q11 + vtrn.16 q12, q13 + vtrn.16 q14, q15 + .endm + + .macro IDCT1D + @================================================================== + @ One dimensional 64 element inverse DCT + @ Input: q8 to q15 loaded with data + @ q0 loaded with constants + @ Output: q8 to q15 + @ Registers used: q0, q4 to q15 + @ Assumptions: 16 bit data, first elements in least significant + @ halfwords + @================================================================== + + @1st stage + vqrdmulh.s16 q4, q15, d0[2] @q4 = a1*vx7 + vqrdmulh.s16 q5, q9, d0[2] @q5 = a1*vx1 + vqrdmulh.s16 q6, q13, d0[3] @q6 = a2*vx5 + vqrdmulh.s16 q7, q11, d1[1] @q7 = ma2*vx3 + vqrdmulh.s16 q2, q14, d0[1] @q6 = a0*vx6 + vqrdmulh.s16 q3, q10, d0[1] @q7 = a0*vx2 + vqadd.s16 q9, q4, q9 @q9 = t1 = a1*vx7 + vx1 + vqsub.s16 q5, q5, q15 @q5 = t8 = a1*vx1 - vx7 + vqadd.s16 q15, q6, q11 @q15 = t7 = a2*vx5 + vx3 + vqadd.s16 q11, q7, q13 @q11 = t3 = ma2*vx3 + vx5 + + @2nd stage + vqadd.s16 q13, q8, q12 @q13 = t5 = vx0 + vx4 + vqsub.s16 q8, q8, q12 @q8 = t0 = vx0 - vx4 + vqadd.s16 q10, q2, q10 @q10 = t2 = a0*vx6 + vx2 + vqsub.s16 q12, q3, q14 @q12 = t4 = a0*vx2 - vx6 + vqadd.s16 q14, q5, q11 @q14 = t6 = t8 + t3 + vqsub.s16 q11, q5, q11 @q11 = t3 = t8 - t3 + vqsub.s16 q5, q9, q15 @q5 = t8 = t1 - t7 + vqadd.s16 q9, q9, q15 @q9 = t1 = t1 + t7 + + @3rd stage + vqadd.s16 q15, q13, q10 @q15 = t7 = t5 + t2 + vqsub.s16 q10, q13, q10 @q10 = t2 = t5 - t2 + vqadd.s16 q13, q8, q12 @q13 = t5 = t0 + t4 + vqsub.s16 q7, q8, q12 @q7 = t0 = t0 - t4 + vqsub.s16 q12, q5, q11 @q12 = t4 = t8 - t3 + vqadd.s16 q11, q5, q11 @q11 = t3 = t8 + t3 + + @4th stage + vqadd.s16 q8, q15, q9 @q8 = vy0 = t7 + t1 + vqsub.s16 q15, q15, q9 @q15 = vy7 = t7 - t1 + vqrdmulh.s16 q6, q12, d0[0] @q6 = c4*t4 + vqrdmulh.s16 q4, q11, d0[0] @q4 = c4*t3 + vqsub.s16 q12, q10, q14 @q12 = vy4 = t2 - t6 + vqadd.s16 q11, q10, q14 @q11 = vy3 = t2 + t6 + vqadd.s16 q10, q7, q6 @q10 = vy2 = t0 + c4*t4 + vqsub.s16 q14, q13, q4 @q14 = vy6 = t5 - c4*t3 + vqadd.s16 q9, q13, q4 @q9 = vy1 = t5 + c4*t3 + vqsub.s16 q13, q7, q6 @q13 = vy5 = t0 - c4*t4 + .endm + + .macro PART1 + @================================================================== + @ Load input input data from memory and shift + @================================================================== + vld1.16 {d16, d17},[r0]! @q8 =row0 + vqshl.s16 q8, q8, #4 @Input data too big?!! + @Maximum MPEG input is 2047/-2048. + vld1.16 {d18, d19},[r0]! @q9 =row1 + vqshl.s16 q9, q9, #4 @Shift 1 instead of 4 + + vld1.16 {d20, d21},[r0]! @q10=row2 + vqshl.s16 q10, q10, #4 + + vld1.16 {d22, d23},[r0]! @q11=row3 + vqshl.s16 q11, q11, #4 + + vld1.16 {d24, d25},[r0]! @q12=row4 + vqshl.s16 q12, q12, #4 + + vld1.16 {d26, d27},[r0]! @q13=row5 + vqshl.s16 q13, q13, #4 + vld1.16 {d28, d29},[r0]! @q14=row6 + vqshl.s16 q14, q14, #4 + vld1.16 {d30, d31},[r0]! @q15=row7 + vqshl.s16 q15, q15, #4 + + @================================================================== + @ refresh the constants that was clobbered last time through IDCT1D + @================================================================== + vld1.16 {d4, d5},[r7] @q2 =constants[2] + vld1.16 {d6, d7},[r8] @q3 =constants[3] + vld1.16 {d8, d9},[r9] @q4 =constants[4] + .endm + + .macro PART2 + @================================================================== + @ Prescale the input + @================================================================== + vqrdmulh.s16 q12, q12, q1 @q12=row4 * constants[1] = vx4 + vqrdmulh.s16 q15, q15, q2 @q15=row7 * constants[2] = vx7 + vqrdmulh.s16 q9, q9, q2 @q9 =row1 * constants[2] = vx1 + vqrdmulh.s16 q13, q13, q4 @q13=row5 * constants[4] = vx5 + vqrdmulh.s16 q11, q11, q4 @q11=row3 * constants[4] = vx3 + vqrdmulh.s16 q14, q14, q3 @q14=row6 * constants[3] = vx6 + vqrdmulh.s16 q10, q10, q3 @q10=row2 * constants[3] = vx2 + vqrdmulh.s16 q8, q8, q1 @q8 =row0 * constants[1] = vx0 + + @================================================================== + @ At thsi point, the input 8x8 x 16 bit coefficients are + @ transposed, prescaled, and loaded in q8 to q15 + @ q0 loaded with scalar constants + @ Perform 1D IDCT + @================================================================== + IDCT1D @perform 1d idct + + @================================================================== + @ Transpose the intermediate results to get read for vertical + @ transformation + @================================================================== + vswp d17, d24 @q8, q12 + vswp d23, d30 @q11, q15 + vswp d21, d28 @q10, q14 + vswp d19, d26 @q9, q13 + + @================================================================== + @ Load the bias + @================================================================== + vdup.32 q4, d1[1] @a cycle is saved by loading + @the bias at this point + + @================================================================== + @ Finish the transposition + @================================================================== + vtrn.32 q8, q10 + vtrn.32 q9, q11 + vtrn.32 q12, q14 + vtrn.32 q13, q15 + vtrn.16 q8, q9 + vtrn.16 q10, q11 + vtrn.16 q12, q13 + vtrn.16 q14, q15 + + @================================================================== + @ Add bias + @================================================================== + vqadd.s16 q8, q8, q4 + + @================================================================== + @ IDCT 2nd half + @================================================================== + IDCT1D @perform 1d dct + + @================================================================== + @ Scale and clamp the output to correct range and save to memory + @ 1. scale to 8bits by right shift 6 + @ 2. clamp output to [0, 255] by min/max + @ 3. use multiple store. Each store will save one row of output. + @ The st queue size is 4, so do no more than 4 str in sequence. + @================================================================== + ldr r5, =constants+5*16 @constants[5], + vld1.16 d10, [r5] @load clamping parameters + vdup.s16 q6, d10[0] @q6=[0000000000000000] + vdup.s16 q7, d10[1] @q7=[FFFFFFFFFFFFFFFF] + + @Save the results + vshr.s16 q8, q8, #6 @q8 = vy0 + vmax.s16 q8, q8, q6 @clamp >0 + vmin.s16 q8, q8, q7 @clamp <255 + + vshr.s16 q9, q9, #6 @q9 = vy1 + vmax.s16 q9, q9, q6 @clamp >0 + vmin.s16 q9, q9, q7 @clamp <255 + + vshr.s16 q10, q10, #6 @q10 = vy2 + vmax.s16 q10, q10, q6 @clamp >0 + vmin.s16 q10, q10, q7 @clamp <255 + + vshr.s16 q11, q11, #6 @q11 = vy3 + vmax.s16 q11, q11, q6 @clamp >0 + vmin.s16 q11, q11, q7 @clamp <255 + + vst1.16 {d16, d17},[r1],r2 @q8 =row0 + vst1.16 {d18, d19},[r1],r2 @q9 =row1 + vst1.16 {d20, d21},[r1],r2 @q10=row2 + vst1.16 {d22, d23},[r1],r2 @q11=row3 + + vshr.s16 q12, q12, #6 @q12 = vy4 + vmax.s16 q12, q12, q6 @clamp >0 + vmin.s16 q12, q12, q7 @clamp <255 + + vshr.s16 q13, q13, #6 @q13 = vy5 + vmax.s16 q13, q13, q6 @clamp >0 + vmin.s16 q13, q13, q7 @clamp <255 + + vshr.s16 q14, q14, #6 @q14 = vy6 + vmax.s16 q14, q14, q6 @clamp >0 + vmin.s16 q14, q14, q7 @clamp <255 + + vshr.s16 q15, q15, #6 @q15 = vy7 + vmax.s16 q15, q15, q6 @clamp >0 + vmin.s16 q15, q15, q7 @clamp <255 + + vst1.16 {d24, d25},[r1],r2 @q12=row4 + vst1.16 {d26, d27},[r1],r2 @q13=row5 + vst1.16 {d28, d29},[r1],r2 @q14=row6 + vst1.16 {d30, d31},[r1] @q15=row7 + .endm + + .macro BIG_BODY_TRANSPOSE_INPUT + @================================================================== + @ Main body of idct + @================================================================== + PART1 + Transpose8x8 + PART2 + .endm + + .macro IDCT_ENTRY + @================================================================== + @ Load the locations of the constants + @================================================================== + ldr r5, =constants+0*16 @constants[0] + ldr r6, =constants+1*16 @constants[1] + ldr r7, =constants+2*16 @constants[2] + ldr r8, =constants+3*16 @constants[3] + ldr r9, =constants+4*16 @constants[4] + + @================================================================== + @ Load the coefficients + @ only some input coefficients are load due to register constrain + @================================================================== + vld1.16 {d0, d1},[r5] @q0 =constants[0] (scalars) + vld1.16 {d2, d3},[r6] @q1 =constants[1] + .endm +@========================================================================== +@ END of MACRO DEFINITION +@========================================================================== + + + .section idct_func, "x" @ ARE + .text @ idct_func, CODE, READONLY + .align 2 + .code 32 @ CODE32 + +@========================================================================== +@ Main Routine +@========================================================================== + + .global idct_1x1_venum + .global idct_2x2_venum + .global idct_4x4_venum + .global idct_8x8_venum + +@========================================================================== +@ FUNCTION : idct_1x1_venum +@-------------------------------------------------------------------------- +@ DISCRIPTION : ARM optimization of one 1x1 block iDCT +@-------------------------------------------------------------------------- +@ C PROTOTYPE : void idct_1x1_venum(int16 * input, +@ int16 * output, +@ int32 stride) +@-------------------------------------------------------------------------- +@ REG INPUT : R0 pointer to input (int16) +@ R1 pointer to output (int16) +@ R2 block stride +@-------------------------------------------------------------------------- +@ STACK ARG : None +@-------------------------------------------------------------------------- +@ MEM INPUT : None +@-------------------------------------------------------------------------- +@ REG OUTPUT : None +@-------------------------------------------------------------------------- +@ MEM OUTPUT : None +@-------------------------------------------------------------------------- +@ REG AFFECTED : R0 - R2 +@-------------------------------------------------------------------------- +@ STACK USAGE : none +@-------------------------------------------------------------------------- +@ CYCLES : 17 cycles +@-------------------------------------------------------------------------- +@ NOTES : +@ This idct_1x1_venum code was developed with ARM instruction set. +@ +@ ARM REGISTER ALLOCATION +@ ========================================================================= +@ r0 : pointer to input data +@ r1 : pointer to output area +@ r2 : stride in the output buffer +@========================================================================== +.type idct_1x1_venum, %function +idct_1x1_venum: + + ldrsh r3, [r0] @ Load signed half word (int16) + ldr r2, =1028 @ 1028 = 4 + 128 << 3 + @ 4 for rounding, 128 for offset + add r2, r3, r2 + asrs r2, r2, #3 @ Divide by 8, and set status bit + movmi r2, #0 @ Clamp to be greater than 0 + cmp r2, #255 + movgt r2, #255 @ Clamp to be less than 255 + str r2, [r1] @ Save output + bx lr @ Return to caller + + @ end of idct_1x1_venum + + +@========================================================================== +@ FUNCTION : idct_2x2_venum +@-------------------------------------------------------------------------- +@ DISCRIPTION : VeNum optimization of one 2x2 block iDCT +@-------------------------------------------------------------------------- +@ C PROTOTYPE : void idct_2x2_venum(int16 * input, +@ int16 * output, +@ int32 stride) +@-------------------------------------------------------------------------- +@ REG INPUT : R0 pointer to input (int16) +@ R1 pointer to output (int16) +@ R2 block stride +@-------------------------------------------------------------------------- +@ STACK ARG : None +@-------------------------------------------------------------------------- +@ MEM INPUT : None +@-------------------------------------------------------------------------- +@ REG OUTPUT : None +@-------------------------------------------------------------------------- +@ MEM OUTPUT : None +@-------------------------------------------------------------------------- +@ REG AFFECTED : R0 - R2 +@-------------------------------------------------------------------------- +@ STACK USAGE : none +@-------------------------------------------------------------------------- +@ CYCLES : 27 cycles +@-------------------------------------------------------------------------- +@ NOTES : Output buffer must be an 8x8 16-bit buffer +@ +@ ARM REGISTER ALLOCATION +@ ========================================== +@ r0 : pointer to input data +@ r1 : pointer to output area +@ r2 : stride in the output buffer +@ ------------------------------------------- +@ +@ VENUM REGISTER ALLOCATION +@ ================================================= +@ q0 : output x0 - x4 +@ q1 : not used +@ q2 : not used +@ q3 : not used +@ q4 : not used +@ q5 : not used +@ q6 : not used +@ q7 : not used +@ q8 : input y0 - y4 +@ q9 : intermediate value +@ q10 : intermediate value +@ q11 : offset value +@ q12 : clamp value +@ q13 : not used +@ q14 : not used +@ q15 : not used +@========================================================================== +.type idct_2x2_venum, %function +idct_2x2_venum: + + vld4.32 {d16, d17, d18, d19}, [r0] + @ d16: y0 | y1 | y2 | y3 (LSB | MSB) + + vtrn.32 d16, d17 @ d16: y0 | y1 | X | X + @ d17: y2 | y3 | X | X + + vqadd.s16 d18, d16, d17 @ d18: y0+y2 | y1+y3 | X | X q: saturated + vqsub.s16 d19, d16, d17 @ d19: y0-y2 | y1-y3 | X | X q: saturated + + vtrn.16 d18, d19 @ d18: y0+y2 | y0-y2 | X | X + @ d19: y1+y3 | y1-y3 | X | X + + vqadd.s16 d20, d18, d19 @ d20: (y0+y2)+(y1+y3) | (y0-y2)+(y1-y3) + @ x0 | x2 | X | X + vqsub.s16 d21, d18, d19 @ d21: (y0+y2)-(y1+y3) | (y0-y2)-(y1-y3) + @ x1 | x3 | X | X + + vtrn.16 d20, d21 @ d20: x0 | x1 | X | X + @ d21: x2 | x3 | X | X + + vrshr.s16 q10, q10, #3 @ Divide by 8 + + vmov.i16 q11, #128 @ q11 = 128|128|128|128|128|128|128|128 + vqadd.s16 q0, q10, q11 @ Add offset to make output in [0,255] + + vmov.i16 q12, #0 @ q12 = [0000000000000000] + vmov.i16 q13, #255 @ q13 = [FFFFFFFFFFFFFFFF] (hex) + + vmax.s16 q0, q0, q12 @ Clamp > 0 + vmin.s16 q0, q0, q13 @ Clamp < 255 + + vstr d0, [r1] @ Store x0 | x1 | X | X + @ Potential out of boundary issue + add r1, r1, r2 @ Add the offset to the output pointer + vstr d1, [r1] @ Store x2 | x3 | X | X + @ Potential out of boundary issue + bx lr @ Return to caller + + @ end of idct_2x2_venum + + +@========================================================================== +@ FUNCTION : idct_4x4_venum +@-------------------------------------------------------------------------- +@ DISCRIPTION : VeNum optimization of one 4x4 block iDCT +@-------------------------------------------------------------------------- +@ C PROTOTYPE : void idct_4x4_venum(int16 * input, +@ int16 * output, +@ int32 stride) +@-------------------------------------------------------------------------- +@ REG INPUT : R0 pointer to input (int16) +@ R1 pointer to output (int16) +@ R2 block stride +@-------------------------------------------------------------------------- +@ STACK ARG : None +@-------------------------------------------------------------------------- +@ MEM INPUT : None +@-------------------------------------------------------------------------- +@ REG OUTPUT : None +@-------------------------------------------------------------------------- +@ MEM OUTPUT : None +@-------------------------------------------------------------------------- +@ REG AFFECTED : R0 - R3, R12 +@-------------------------------------------------------------------------- +@ STACK USAGE : none +@-------------------------------------------------------------------------- +@ CYCLES : 56 cycles +@-------------------------------------------------------------------------- +@ NOTES : +@ +@ ARM REGISTER ALLOCATION +@ ========================================== +@ r0 : pointer to input data +@ r1 : pointer to output area +@ r2 : stride in the output buffer +@ r3 : pointer to the coefficient set +@ r12 : pointer to the coefficient set +@ ------------------------------------------- +@ +@ VENUM REGISTER ALLOCATION +@ ================================================= +@ q0 : coefficients[0] +@ q1 : coefficients[1] +@ q2 : coefficients[2] +@ q3 : coefficients[3] +@ q4 : not used +@ q5 : not used +@ q6 : not used +@ q7 : not used +@ q8 : input y0 - y7 +@ q9 : input y8 - y15 +@ q10 : intermediate value +@ q11 : intermediate value +@ q12 : intermediate value +@ q13 : intermediate value +@ q14 : intermediate value +@ q15 : intermediate value +@========================================================================== +.type idct_4x4_venum, %function +idct_4x4_venum: + + @ Load the locations of the first 2 sets of coefficients + ldr r3, =coefficient+0*16 @ coefficient[0] + ldr r12, =coefficient+1*16 @ coefficient[1] + + @ Load the first 2 sets of coefficients + vld1.16 {d0, d1},[r3] @ q0 = C4 | C2 | C4 | C6 | C4 | C2 | C4 | C6 + vld1.16 {d2, d3},[r12] @ q1 = C4 | C6 | C4 | C2 | C4 | C6 | C4 | C2 + + @ Load the locations of the second 2 sets of coefficients + ldr r3, =coefficient+2*16 @ coefficient[2] + ldr r12, =coefficient+3*16 @ coefficient[3] + + @ Load the second 2 sets of coefficients + vld1.16 {d4, d5},[r3] @ q2 = C4 | C4 | C4 | C4 | C2 | C2 | C2 | C2 + vld1.16 {d6, d7},[r12] @ q3 = C4 | C4 | C4 | C4 | C6 | C6 | C6 | C6 + + @ Load the input values + vld1.16 {d16}, [r0], r2 @ d16: y0 | y1 | y2 | y3 (LSB | MSB) + vld1.16 {d17}, [r0], r2 @ d17: y4 | y5 | y6 | y7 (LSB | MSB) + vld1.16 {d18}, [r0], r2 @ d18: y8 | y9 | y10 | y11 (LSB | MSB) + vld1.16 {d19}, [r0], r2 @ d19: y12 | y13 | y14 | y15 (LSB | MSB) + + @ Apply iDCT Horizonally + + @ q8: y0 |y1 |y2 |y3 |y4 |y5 |y6 |y7 + @ q9: y8 |y9 |y10|y11|y12|y13|y14|y15 + + @====================================================================== + @ vqrdmulh doubles the result and save the high 16 bits of the result, + @ this is equivalent to right shift by 15 bits. + @ since coefficients are in Q15 format, it contradicts with the right + @ shift 15 here, so the final result is in Q0 format + @ + @ vqrdmulh will also round the result + @====================================================================== + + vqrdmulh.s16 q10, q8, q0 @ q10: C4*y0 | C2*y1 | C4*y2 | C6*y3 | C4*y4 | C2*y5 | C4*y6 | C6*y7 + vqrdmulh.s16 q11, q8, q1 @ q11: C4*y0 | C6*y1 | C4*y2 | C2*y3 | C4*y4 | C6*y5 | C4*y6 | C2*y7 + + vqrdmulh.s16 q12, q9, q0 @ q12: C4*y8 | C2*y9 | C4*y10 | C6*y11 | C4*y12 | C2*y13 | C4*y14 | C6*y15 + vqrdmulh.s16 q13, q9, q1 @ q13: C4*y8 | C6*y9 | C4*y10 | C2*y11 | C4*y12 | C6*y13 | C4*y14 | C2*y15 + + vtrn.32 q10, q12 @ q10: C4*y0 | C2*y1 | C4*y8 | C2*y9 | C4*y4 | C2*y5 | C4*y12 | C2*y13 + @ q12: C4*y2 | C6*y3 | C4*y10 | C6*y11 | C4*y6 | C6*y7 | C4*y14 | C6*y15 + + vtrn.32 q11, q13 @ q11: C4*y0 | C6*y1 | C4*y8 | C6*y9 | C4*y4 | C6*y5 | C4*y12 | C6*y13 + @ q13: C4*y2 | C2*y3 | C4*y10 | C2*y11 | C4*y6 | C2*y7 | C4*y14 | C2*y15 + + vqadd.s16 q14, q10, q12 @ q14: C4*y0 + C4*y2 | C2*y1 + C6*y3 | C4*y8 + C4*y10 | C2*y9 + C6*y11 | C4*y4 + C4*y6 | C2*y5 + C6*y7 | C4*y12 + C4*y14 | C2*y13 + C6*y15 + @ S0 | S2 | S8 | S10 | S4 | S6 | S12 | S14 + + vqsub.s16 q15, q11, q13 @ q15: C4*y0 - C4*y2 | C6*y1 - C2*y3 | C4*y8 - C4*y10 | C6*y9 - C2*y11 | C4*y4 - C4*y6 | C6*y5 - C2*y7 | C4*y12 - C4*y14 | C6*y13 - C2*y15 + @ S1 | S3 | S9 | S11 | S5 | S7 | S13 | S15 + + vtrn.16 q14, q15 @ q14: S0 | S1 | S8 | S9 | S4 | S5 | S12 | S13 + @ q15: S2 | S3 | S10 | S11 | S6 | S7 | S14 | S15 + + vqadd.s16 q8, q14, q15 @ q8: Z0 | Z1 | Z8 | Z9 | Z4 | Z5 | Z12 | Z13 + vqsub.s16 q9, q14, q15 @ q9: Z3 | Z2 | Z11 | Z10 | Z7 | Z6 | Z15 | Z14 + vrev32.16 q9, q9 @ q9: Z2 | Z3 | Z10 | Z11 | Z6 | Z7 | Z14 | Z15 + + + @ Apply iDCT Vertically + + vtrn.32 q8, q9 @ q8: Z0 | Z1 | Z2 | Z3 | Z4 | Z5 | Z6 | Z7 + @ q9: Z8 | Z9 | Z10 | Z11 | Z12 | Z13 | Z14 | Z15 + + + vqrdmulh.s16 q10, q8, q2 @ q10: C4*Z0 | C4*Z1 | C4*Z2 | C4*Z3 | C2*Z4 | C2*Z5 | C2*Z6 | C2*Z7 + vqrdmulh.s16 q11, q8, q3 @ q11: C4*Z0 | C4*Z1 | C4*Z2 | C4*Z3 | C6*Z4 | C6*Z5 | C6*Z6 | C6*Z7 + + vqrdmulh.s16 q12, q9, q2 @ q12: C4*Z8 | C4*Z9 | C4*Z10 | C4*Z11 | C2*Z12 | C2*Z13 | C2*Z14 | C2*Z15 + vqrdmulh.s16 q13, q9, q3 @ q13: C4*Z8 | C4*Z9 | C4*Z10 | C4*Z11 | C6*Z12 | C6*Z13 | C6*Z14 | C6*Z15 + + vqadd.s16 q14, q10, q13 @ q14: C4*Z0+C4*Z8 | C4*Z1+C4*Z9 | C4*Z2+C4*Z10 | C4*Z3+C4*Z11 | C2*Z4+C6*Z12 | C2*Z5+C6*Z13 | C2*Z6+C6*Z14 | C2*Z7+C6*Z15 + @ s0 | s4 | s8 | s12 | s2 | s6 | s10 | s14 + + vqsub.s16 q15, q11, q12 @ q15: C4*Z0-C4*Z8 | C4*Z1-C4*Z9 | C4*Z2-C4*Z10 | C4*Z3-C4*Z11 | C6*Z4-C2*Z12 | C6*Z5-C2*Z13 | C6*Z6-C2*Z14 | C6*Z7-C2*Z15 + @ s1 | s5 | s9 | s13 | s3 | s7 | s11 | s15 + + vswp d29, d30 @ q14: s0 | s4 | s8 | s12 | s1 | s5 | s9 | s13 + @ q15: s2 | s6 | s10 | s14 | s3 | s7 | s11 | s15 + + vqadd.s16 q8, q14, q15 @ q8: x0 | x4 | x8 | x12 | x1 | x5 | x9 | x13 + vqsub.s16 q9, q14, q15 @ q9: x3 | x7 | x11 | x15 | x2 | x6 | x10 | x14 + + vmov.i16 q10, #0 @ q10=[0000000000000000] + vmov.i16 q11, #255 @ q11=[FFFFFFFFFFFFFFFF] (hex) + + vmov.i16 q0, #128 @ q0 = 128|128|128|128|128|128|128|128 + + vqadd.s16 q8, q8, q0 @ Add the offset + vqadd.s16 q9, q9, q0 @ Add the offset + + vmax.s16 q8, q8, q10 @ clamp > 0 + vmin.s16 q8, q8, q11 @ clamp < 255 + + vmax.s16 q9, q9, q10 @ clamp > 0 + vmin.s16 q9, q9, q11 @ clamp < 255 + + vst1.16 {d16}, [r1], r2 @ d16: x0 | x1 | x2 | x3 (LSB | MSB) + vst1.16 {d17}, [r1], r2 @ d17: x4 | x5 | x6 | x7 (LSB | MSB) + vst1.16 {d19}, [r1], r2 @ d18: x8 | x9 | x10 | x11 (LSB | MSB) + vst1.16 {d18}, [r1], r2 @ d19: x12| x13 | x14 | x15 (LSB | MSB) + + bx lr @ Return to caller + + @ end of idct_4x4_venum + +@========================================================================== +@ FUNCTION : idct_8x8_venum +@-------------------------------------------------------------------------- +@ DISCRIPTION : VeNum optimization of one 8x8 block iDCT +@-------------------------------------------------------------------------- +@ C PROTOTYPE : void idct_8x8_venum(int16 * input, +@ int16 * output, +@ int32 stride) +@-------------------------------------------------------------------------- +@ REG INPUT : R0 pointer to input (int16) +@ R1 pointer to output (int16) +@ R2 block stride +@-------------------------------------------------------------------------- +@ STACK ARG : None +@-------------------------------------------------------------------------- +@ MEM INPUT : None +@-------------------------------------------------------------------------- +@ REG OUTPUT : None +@-------------------------------------------------------------------------- +@ MEM OUTPUT : None +@-------------------------------------------------------------------------- +@ REG AFFECTED : R0 - R9 +@-------------------------------------------------------------------------- +@ STACK USAGE : none +@-------------------------------------------------------------------------- +@ CYCLES : 177 cycles +@-------------------------------------------------------------------------- +@ NOTES : +@ +@ It was tested to be IEEE 1180 compliant. Since IEEE 1180 compliance is more stringent +@ than MPEG-4 compliance, this version is also MPEG-4 compliant. +@ +@ CODE STRUCTURE: +@ (i) Macros for transposing an 8x8 matrix and for configuring the VFP unit are defined. +@ (ii) Macro for IDCT in one dimension is defined as four stages +@ (iii) The two dimensional code begins +@ (iv) constants are defined in the area DataArea +@ +@ PROGRAM FLOW: +@ +@ The VFP is configured +@ The parameters to IDCT are loaded +@ the coefficients are loaded +@ loop: +@ decrement loop counter +@ The first input Matrix is loaded and pre-scaled +@ The input is prescaled using the constants +@ IDCT is performed in one dimension on the 8 columns +@ The matrix is transposed +@ A bias is loaded an added to the matrix +@ IDCT is performed in one dimension on the 8 rows +@ The matrix is post-scaled +@ The matrix is saved +@ test loop counter and loop if greater than zero +@ stop +@ +@ +@ ARM REGISTER ALLOCATION +@ ========================================== +@ r0 : pointer to input data +@ r1 : pointer to output are +@ r2 : stride in the output buffer +@ r3 : +@ r4 : +@ r5 : pointer to constants[0] [5] +@ r6 : pointer to constants[1] +@ r7 : pointer to constants[2] +@ r8 : pointer to constants[3] +@ r9 : pointer to constants[4] +@ ------------------------------------------- +@ +@ VENUM REGISTER ALLOCATION +@ ================================================= +@ q0 : constants[0] +@ q1 : constants[1] +@ q2 : constants[2], IDCT1D in-place scratch +@ q3 : constants[3], IDCT1D in-place scratch +@ q4 : constants[4], IDCT1D in-place scratch, and bias compensation +@ q5 : IDCT1D in-place scratch +@ q6 : IDCT1D in-place scratch +@ q7 : IDCT1D in-place scratch +@ q8 : Matrix[0] IDCT1D in-place scratch +@ q9 : Matrix[1] IDCT1D in-place scratch +@ q10 : Matrix[2] IDCT1D in-place scratch +@ q11 : Matrix[3] IDCT1D in-place scratch +@ q12 : Matrix[4] IDCT1D in-place scratch +@ q13 : Matrix[5] IDCT1D in-place scratch +@ q14 : Matrix[6] IDCT1D in-place scratch +@ q15 : Matrix[7] IDCT1D in-place scratch +@========================================================================== +.type idct_8x8_venum, %function +idct_8x8_venum: + + push {r5-r9} + vpush {d8-d15} + IDCT_ENTRY + BIG_BODY_TRANSPOSE_INPUT + vpop {d8-d15} + pop {r5-r9} + bx lr + @ end of idct_8x8_venum + +@========================================================================== +@ Constants Definition AREA: define idct kernel, bias +@========================================================================== + .section ro_data_area @ AREA RODataArea + .data @ DATA, READONLY + .align 5 @ ALIGN=5 + +constants: + .hword 23170, 13573, 6518, 21895, -23170, -21895, 8223, 8224 + .hword 16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725 + .hword 22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521 + .hword 21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692 + .hword 19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722 + .hword 0, 255, 0, 0 + +coefficient: @ These are the coefficent used by 4x4 iDCT in Q15 format + .hword 11585, 15137, 11585, 6270, 11585, 15137, 11585, 6270 @ C4, C2, C4, C6, C4, C2, C4, C6 /2 + .hword 11585, 6270, 11585, 15137, 11585, 6270, 11585, 15137 @ C4, C6, C4, C2, C4, C6, C4, C2 /2 + .hword 11585, 11585, 11585, 11585, 15137, 15137, 15137, 15137 @ C4, C4, C4, C4, C2, C2, C2, C2 /2 + .hword 11585, 11585, 11585, 11585, 6270, 6270, 6270, 6270 @ C4, C4, C4, C4, C6, C6, C6, C6 /2 + +.end diff --git a/config.h b/config.h deleted file mode 100644 index 6e38c88..0000000 --- a/config.h +++ /dev/null @@ -1,131 +0,0 @@ -/* config.h. Generated from config.h.in by configure. */ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Build number */ -#define BUILD "20110829" - -/* Support arithmetic encoding */ -#define C_ARITH_CODING_SUPPORTED 1 - -/* Support arithmetic decoding */ -#define D_ARITH_CODING_SUPPORTED 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_DLFCN_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_INTTYPES_H 1 - -/* Define to 1 if you have the header file. */ -/* #undef HAVE_JNI_H */ - -/* Define to 1 if you have the `memcpy' function. */ -#define HAVE_MEMCPY 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_MEMORY_H 1 - -/* Define to 1 if you have the `memset' function. */ -#define HAVE_MEMSET 1 - -/* Define if your compiler supports prototypes */ -#define HAVE_PROTOTYPES 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDDEF_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDINT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STDLIB_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRINGS_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_STRING_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_STAT_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_SYS_TYPES_H 1 - -/* Define to 1 if you have the header file. */ -#define HAVE_UNISTD_H 1 - -/* Define to 1 if the system has the type `unsigned char'. */ -#define HAVE_UNSIGNED_CHAR 1 - -/* Define to 1 if the system has the type `unsigned short'. */ -#define HAVE_UNSIGNED_SHORT 1 - -/* Compiler does not support pointers to undefined structures. */ -/* #undef INCOMPLETE_TYPES_BROKEN */ - -/* libjpeg API version */ -#define JPEG_LIB_VERSION 62 - -/* Define to the sub-directory in which libtool stores uninstalled libraries. - */ -#define LT_OBJDIR ".libs/" - -/* Define if you have BSD-like bzero and bcopy */ -/* #undef NEED_BSD_STRINGS */ - -/* Define if you need short function names */ -/* #undef NEED_SHORT_EXTERNAL_NAMES */ - -/* Define if you have sys/types.h */ -#define NEED_SYS_TYPES_H 1 - -/* Name of package */ -#define PACKAGE "libjpeg-turbo" - -/* Define to the address where bug reports for this package should be sent. */ -#define PACKAGE_BUGREPORT "" - -/* Define to the full name of this package. */ -#define PACKAGE_NAME "libjpeg-turbo" - -/* Define to the full name and version of this package. */ -#define PACKAGE_STRING "libjpeg-turbo 1.1.90" - -/* Define to the one symbol short name of this package. */ -#define PACKAGE_TARNAME "libjpeg-turbo" - -/* Define to the home page for this package. */ -#define PACKAGE_URL "" - -/* Define to the version of this package. */ -#define PACKAGE_VERSION "1.1.90" - -/* Define if shift is unsigned */ -/* #undef RIGHT_SHIFT_IS_UNSIGNED */ - -/* Define to 1 if you have the ANSI C header files. */ -#define STDC_HEADERS 1 - -/* Version number of package */ -#define VERSION "1.1.90" - -/* Use accelerated SIMD routines. */ -#define WITH_SIMD 1 - -/* Define to 1 if type `char' is unsigned and you are not using gcc. */ -#ifndef __CHAR_UNSIGNED__ -/* # undef __CHAR_UNSIGNED__ */ -#endif - -/* Define to empty if `const' does not conform to ANSI C. */ -/* #undef const */ - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -/* #undef inline */ -#endif - -/* Define to `unsigned int' if does not define. */ -/* #undef size_t */ diff --git a/jconfig.h b/jconfig.h index 3f12221..d4084ec 100644 --- a/jconfig.h +++ b/jconfig.h @@ -59,4 +59,3 @@ /* Define to `unsigned int' if does not define. */ /* #undef size_t */ - diff --git a/jdcolor.c b/jdcolor.c index b3d7e06..d5d4936 100644 --- a/jdcolor.c +++ b/jdcolor.c @@ -586,7 +586,9 @@ jinit_color_deconverter (j_decompress_ptr cinfo) if (cinfo->dither_mode == JDITHER_NONE) { if (cinfo->jpeg_color_space == JCS_YCbCr) { cconvert->pub.color_convert = ycc_rgb_565_convert; +#ifndef ANDROID_JPEG_USE_VENUM build_ycc_rgb_table(cinfo); +#endif } else if (cinfo->jpeg_color_space == JCS_GRAYSCALE) { cconvert->pub.color_convert = gray_rgb_565_convert; } else if (cinfo->jpeg_color_space == JCS_RGB) { @@ -596,8 +598,13 @@ jinit_color_deconverter (j_decompress_ptr cinfo) } else { /* only ordered dither is supported */ if (cinfo->jpeg_color_space == JCS_YCbCr) { +#ifdef ANDROID_JPEG_USE_VENUM + /* Use VeNum routine even if dithering option is selected. */ + cconvert->pub.color_convert = ycc_rgb_565_convert; +#else cconvert->pub.color_convert = ycc_rgb_565D_convert; build_ycc_rgb_table(cinfo); +#endif } else if (cinfo->jpeg_color_space == JCS_GRAYSCALE) { cconvert->pub.color_convert = gray_rgb_565D_convert; } else if (cinfo->jpeg_color_space == JCS_RGB) { @@ -632,6 +639,32 @@ METHODDEF(void) ycc_rgba_8888_convert (j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION input_row, JSAMPARRAY output_buf, int num_rows) +#ifdef ANDROID_JPEG_USE_VENUM +/* + * Converts YCC->RGBA8888 using VeNum instructions. + */ +{ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + JSAMPROW inptr0, inptr1, inptr2; + JSAMPROW outptr; + JDIMENSION row; + + for (row = 0; row < (JDIMENSION)num_rows; row++) + { + inptr0 = input_buf[0][input_row]; + inptr1 = input_buf[1][input_row]; + inptr2 = input_buf[2][input_row]; + input_row++; + outptr = *output_buf++; + + yvup2abgr8888_venum((UINT8*) inptr0, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr, + cinfo->output_width); + } +} +#else { my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; register int y, cb, cr; @@ -668,13 +701,38 @@ ycc_rgba_8888_convert (j_decompress_ptr cinfo, } } } - +#endif METHODDEF(void) ycc_rgb_565_convert (j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION input_row, JSAMPARRAY output_buf, int num_rows) { +#ifdef ANDROID_JPEG_USE_VENUM +/* + * Converts YCC->RGB565 using VeNum instructions. + */ + my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; + JSAMPROW inptr0, inptr1, inptr2; + JSAMPROW outptr; + JDIMENSION row; + + for (row = 0; row < (JDIMENSION)num_rows; row++) + { + inptr0 = input_buf[0][input_row]; + inptr1 = input_buf[1][input_row]; + inptr2 = input_buf[2][input_row]; + input_row++; + outptr = *output_buf++; + + yvup2rgb565_venum((UINT8*) inptr0, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr, + cinfo->output_width); + } +} +#else my_cconvert_ptr cconvert = (my_cconvert_ptr) cinfo->cconvert; register int y, cb, cr; register JSAMPROW outptr; @@ -741,6 +799,7 @@ ycc_rgb_565_convert (j_decompress_ptr cinfo, } } } +#endif // venum METHODDEF(void) ycc_rgb_565D_convert (j_decompress_ptr cinfo, diff --git a/jdmerge.c b/jdmerge.c index dec2e29..bde1b11 100644 --- a/jdmerge.c +++ b/jdmerge.c @@ -391,6 +391,24 @@ METHODDEF(void) h2v1_merged_upsample_565 (j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf) +#ifdef ANDROID_JPEG_USE_VENUM +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + JSAMPROW inptr0, inptr1, inptr2; + JSAMPROW outptr; + + inptr0 = input_buf[0][in_row_group_ctr]; + inptr1 = input_buf[1][in_row_group_ctr]; + inptr2 = input_buf[2][in_row_group_ctr]; + outptr = output_buf[0]; + + yyvup2rgb565_venum((UINT8*) inptr0, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr, + cinfo->output_width); +} +#else { my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; register int y, cred, cgreen, cblue; @@ -449,6 +467,7 @@ h2v1_merged_upsample_565 (j_decompress_ptr cinfo, *(INT16*)outptr = rgb; } } +#endif METHODDEF(void) h2v1_merged_upsample_565D (j_decompress_ptr cinfo, @@ -521,6 +540,31 @@ METHODDEF(void) h2v2_merged_upsample_565 (j_decompress_ptr cinfo, JSAMPIMAGE input_buf, JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf) +#ifdef ANDROID_JPEG_USE_VENUM +{ + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + JSAMPROW outptr0, outptr1; + JSAMPROW inptr00, inptr01, inptr1, inptr2; + inptr00 = input_buf[0][in_row_group_ctr*2]; + inptr01 = input_buf[0][in_row_group_ctr*2 + 1]; + inptr1 = input_buf[1][in_row_group_ctr]; + inptr2 = input_buf[2][in_row_group_ctr]; + outptr0 = output_buf[0]; + outptr1 = output_buf[1]; + + yyvup2rgb565_venum((UINT8*) inptr00, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr0, + cinfo->output_width); + + yyvup2rgb565_venum((UINT8*) inptr01, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr1, + cinfo->output_width); +} +#else { my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; register int y, cred, cgreen, cblue; @@ -599,6 +643,7 @@ h2v2_merged_upsample_565 (j_decompress_ptr cinfo, *(INT16*)outptr1 = rgb; } } +#endif METHODDEF(void) h2v2_merged_upsample_565D (j_decompress_ptr cinfo, @@ -727,11 +772,16 @@ jinit_merged_upsampler (j_decompress_ptr cinfo) #ifdef ANDROID_RGB if (cinfo->out_color_space == JCS_RGB_565) { - if (cinfo->dither_mode == JDITHER_NONE) { - upsample->upmethod = h2v2_merged_upsample_565; - } else { - upsample->upmethod = h2v2_merged_upsample_565D; - } +#ifndef ANDROID_JPEG_USE_VENUM + if (cinfo->dither_mode != JDITHER_NONE) { + upsample->upmethod = h2v2_merged_upsample_565D; + } else +#endif + { + /* If VeNum routines are enabled, use h2v2_merged_upsample_565 + * function regardless of dither mode. */ + upsample->upmethod = h2v2_merged_upsample_565; + } } #endif /* ANDROID_RGB */ @@ -747,18 +797,25 @@ jinit_merged_upsampler (j_decompress_ptr cinfo) upsample->upmethod = h2v1_merged_upsample; #ifdef ANDROID_RGB if (cinfo->out_color_space == JCS_RGB_565) { - if (cinfo->dither_mode == JDITHER_NONE) { - upsample->upmethod = h2v1_merged_upsample_565; - } else { - upsample->upmethod = h2v1_merged_upsample_565D; - } +#ifndef ANDROID_JPEG_USE_VENUM + if (cinfo->dither_mode != JDITHER_NONE) { + upsample->upmethod = h2v1_merged_upsample_565D; + } else +#endif + { + /* If VeNum routines are enabled, use h2v1_merged_upsample_565 + * function regardless of dither mode. */ + upsample->upmethod = h2v1_merged_upsample_565; + } } #endif /* ANDROID_RGB */ /* No spare row needed */ upsample->spare_row = NULL; } +#ifndef ANDROID_JPEG_USE_VENUM build_ycc_rgb_table(cinfo); +#endif } #endif /* UPSAMPLE_MERGING_SUPPORTED */ diff --git a/jpegint.h b/jpegint.h index a6fa9aa..937566a 100644 --- a/jpegint.h +++ b/jpegint.h @@ -307,6 +307,46 @@ struct jpeg_color_quantizer { }; +#ifdef ANDROID_JPEG_USE_VENUM +/* IDCT routines */ +EXTERN (void) idct_1x1_venum (INT16 * coeffPtr, INT16 * samplePtr, INT32 stride); +EXTERN (void) idct_2x2_venum (INT16 * coeffPtr, INT16 * samplePtr, INT32 stride); +EXTERN (void) idct_4x4_venum (INT16 * coeffPtr, INT16 * samplePtr, INT32 stride); +EXTERN (void) idct_8x8_venum (INT16 * coeffPtr, INT16 * samplePtr, INT32 stride); + +/* Color conversion routines */ +EXTERN (void) yvup2rgb565_venum (UINT8 *pLumaLine, + UINT8 *pCrLine, + UINT8 *pCbLine, + UINT8 *pRGB565Line, + JDIMENSION nLineWidth); +EXTERN (void) yyvup2rgb565_venum (UINT8 * pLumaLine, + UINT8 *pCrLine, + UINT8 *pCbLine, + UINT8 * pRGB565Line, + JDIMENSION nLineWidth); +EXTERN (void) yvup2bgr888_venum (UINT8 * pLumaLine, + UINT8 *pCrLine, + UINT8 *pCbLine, + UINT8 * pBGR888Line, + JDIMENSION nLineWidth); +EXTERN (void) yyvup2bgr888_venum (UINT8 * pLumaLine, + UINT8 *pCrLine, + UINT8 *pCbLine, + UINT8 * pBGR888Line, + JDIMENSION nLineWidth); +EXTERN (void) yvup2abgr8888_venum (UINT8 * pLumaLine, + UINT8 *pCrLine, + UINT8 *pCbLine, + UINT8 * pABGR888Line, + JDIMENSION nLineWidth); +EXTERN (void) yyvup2abgr8888_venum (UINT8 * pLumaLine, + UINT8 *pCrLine, + UINT8 *pCbLine, + UINT8 * pABGR888Line, + JDIMENSION nLineWidth); +#endif + /* Miscellaneous useful macros */ #undef MAX diff --git a/libjpeg.map b/libjpeg.map new file mode 100644 index 0000000..0b2adfe --- /dev/null +++ b/libjpeg.map @@ -0,0 +1,10 @@ +LIBJPEGTURBO_6.2 { + local: + jsimd_*; + jconst_*; +}; + +LIBJPEG_6.2 { + global: + *; +}; diff --git a/simd/Makefile.am b/simd/Makefile.am index 54a337c..4da4358 100644 --- a/simd/Makefile.am +++ b/simd/Makefile.am @@ -55,8 +55,11 @@ endif if SIMD_ARM libsimd_la_CFLAGS = -DAVOID_TABLES -O3 -fstrict-aliasing -fprefetch-loop-arrays -DANDROID \ - -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT -libsimd_la_SOURCES = jsimd_arm.c jsimd_arm_neon.S + -DANDROID_TILE_BASED_DECODE -DENABLE_ANDROID_NULL_CONVERT -DANDROID_JPEG_USE_VENUM \ + -march=armv7-a -mfpu=neon +libsimd_la_CCASFLAGS = -O3 -march=armv7-a -mfpu=neon +libsimd_la_SOURCES = jsimd_arm.c jsimd_arm_neon.S \ + ../asm/armv7/jdcolor-armv7.S ../asm/armv7/jdidct-armv7.S endif diff --git a/simd/jsimd_arm.c b/simd/jsimd_arm.c index a9d920c..dbd02e1 100644 --- a/simd/jsimd_arm.c +++ b/simd/jsimd_arm.c @@ -27,6 +27,40 @@ #include #include +typedef JMETHOD(void, upsample1_ptr, + (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY * output_data_ptr)); + +typedef struct { + struct jpeg_upsampler pub; /* public fields */ + + /* Color conversion buffer. When using separate upsampling and color + * conversion steps, this buffer holds one upsampled row group until it + * has been color converted and output. + * Note: we do not allocate any storage for component(s) which are full-size, + * ie do not need rescaling. The corresponding entry of color_buf[] is + * simply set to point to the input data array, thereby avoiding copying. + */ + JSAMPARRAY color_buf[MAX_COMPONENTS]; + + /* Per-component upsampling method pointers */ + upsample1_ptr methods[MAX_COMPONENTS]; + + int next_row_out; /* counts rows emitted from color_buf */ + JDIMENSION rows_to_go; /* counts rows remaining in image */ + + /* Height of an input row group for each component. */ + int rowgroup_height[MAX_COMPONENTS]; + + /* These arrays save pixel expansion factors so that int_expand need not + * recompute them each time. They are unused for other upsampling methods. + */ + UINT8 h_expand[MAX_COMPONENTS]; + UINT8 v_expand[MAX_COMPONENTS]; +} my_upsampler; + +typedef my_upsampler * my_upsample_ptr; + static unsigned int simd_support = ~0; #if defined(__linux__) || defined(ANDROID) || defined(__ANDROID__) @@ -354,7 +388,11 @@ jsimd_can_h2v2_merged_upsample (void) { init_simd(); +#ifdef ANDROID_JPEG_USE_VENUM + return 1; +#else return 0; +#endif } GLOBAL(int) @@ -362,7 +400,12 @@ jsimd_can_h2v1_merged_upsample (void) { init_simd(); + +#ifdef ANDROID_JPEG_USE_VENUM + return 1; +#else return 0; +#endif } GLOBAL(void) @@ -371,6 +414,46 @@ jsimd_h2v2_merged_upsample (j_decompress_ptr cinfo, JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf) { +#ifdef ANDROID_JPEG_USE_VENUM + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + JSAMPROW outptr0, outptr1; + JSAMPROW inptr00, inptr01, inptr1, inptr2; + inptr00 = input_buf[0][in_row_group_ctr*2]; + inptr01 = input_buf[0][in_row_group_ctr*2 + 1]; + inptr1 = input_buf[1][in_row_group_ctr]; + inptr2 = input_buf[2][in_row_group_ctr]; + outptr0 = output_buf[0]; + outptr1 = output_buf[1]; + +#ifdef ANDROID_RGB + if (cinfo->out_color_space == JCS_RGBA_8888) { + yyvup2abgr8888_venum((UINT8*) inptr00, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr0, + cinfo->output_width); + yyvup2abgr8888_venum((UINT8*) inptr01, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr1, + cinfo->output_width); + } else +#endif + { + yyvup2bgr888_venum((UINT8*) inptr00, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr0, + cinfo->output_width); + + yyvup2bgr888_venum((UINT8*) inptr01, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr1, + cinfo->output_width); + } + +#endif } GLOBAL(void) @@ -379,6 +462,34 @@ jsimd_h2v1_merged_upsample (j_decompress_ptr cinfo, JDIMENSION in_row_group_ctr, JSAMPARRAY output_buf) { +#ifdef ANDROID_JPEG_USE_VENUM + + my_upsample_ptr upsample = (my_upsample_ptr) cinfo->upsample; + JSAMPROW inptr0, inptr1, inptr2; + JSAMPROW outptr; + + inptr0 = input_buf[0][in_row_group_ctr]; + inptr1 = input_buf[1][in_row_group_ctr]; + inptr2 = input_buf[2][in_row_group_ctr]; + outptr = output_buf[0]; + +#ifdef ANDROID_RGB + if (cinfo->out_color_space == JCS_RGBA_8888) { + yyvup2abgr8888_venum((UINT8*) inptr0, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr, + cinfo->output_width); + } else +#endif + { + yyvup2bgr888_venum((UINT8*) inptr0, + (UINT8*) inptr2, + (UINT8*) inptr1, + (UINT8*) outptr, + cinfo->output_width); + } +#endif } GLOBAL(int)