[FFmpeg-devel] [PATCH] huffyuvencdsp: Add ff_diff_bytes_{sse2, avx2}
James Almer
jamrial at gmail.com
Wed Oct 21 04:34:04 CEST 2015
On 10/20/2015 10:32 PM, Timothy Gu wrote:
> SSE2 version 4%-35% faster than MMX depending on the width.
> AVX2 version 1%-13% faster than SSE2 depending on the width.
> ---
>
> Addressed James's and Henrik's advices. Removed heuristics based on width.
> Made available both aligned and unaligned versions. For AVX2 version,
> gracefully fall back on SSE2.
>
> ---
> libavcodec/huffyuvenc.c | 4 +-
> libavcodec/x86/huffyuvencdsp.asm | 110 +++++++++++++++++++++++++++++++------
> libavcodec/x86/huffyuvencdsp_mmx.c | 14 ++++-
> 3 files changed, 107 insertions(+), 21 deletions(-)
>
> diff --git a/libavcodec/huffyuvenc.c b/libavcodec/huffyuvenc.c
> index 49d711a..e080cd9 100644
> --- a/libavcodec/huffyuvenc.c
> +++ b/libavcodec/huffyuvenc.c
> @@ -60,12 +60,12 @@ static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
> }
> return left;
> } else {
> - for (i = 0; i < 16; i++) {
> + for (i = 0; i < 32; i++) {
> const int temp = src[i];
> dst[i] = temp - left;
> left = temp;
> }
> - s->hencdsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
> + s->hencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
> return src[w-1];
> }
> } else {
> diff --git a/libavcodec/x86/huffyuvencdsp.asm b/libavcodec/x86/huffyuvencdsp.asm
> index e001906..699fd38 100644
> --- a/libavcodec/x86/huffyuvencdsp.asm
> +++ b/libavcodec/x86/huffyuvencdsp.asm
> @@ -27,9 +27,9 @@
>
> section .text
>
> -INIT_MMX mmx
> ; void ff_diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
> ; intptr_t w);
> +%macro DIFF_BYTES_PROLOGUE 0
> %if ARCH_X86_32
> cglobal diff_bytes, 3,5,2, dst, src1, src2
> %define wq r4q
> @@ -40,34 +40,108 @@ cglobal diff_bytes, 4,5,2, dst, src1, src2, w
> DECLARE_REG_TMP 4
> %endif ; ARCH_X86_32
> %define i t0q
> +%endmacro
> +
> +; label to jump to if w < regsize
> +%macro DIFF_BYTES_LOOP_PREP 1
> mov i, wq
> - and i, -2 * mmsize
> - jz .setup_loop2
> + and i, -2 * regsize
> + jz %1
> add dstq, i
> add src1q, i
> add src2q, i
> neg i
> -.loop:
> - mova m0, [src1q + i]
> - mova m1, [src1q + i + mmsize]
> - psubb m0, [src2q + i]
> - psubb m1, [src2q + i + mmsize]
> - mova [dstq + i], m0
> - mova [mmsize + dstq + i], m1
> - add i, 2 * mmsize
> - jl .loop
> -.setup_loop2:
> - and wq, 2 * mmsize - 1
> - jz .end
> +%endmacro
> +
> +; mov type used for src1q, dstq, first reg, second reg
> +%macro DIFF_BYTES_LOOP_CORE 4
> +%if regsize != 16
%if mmsize != 16
By checking regsize you're using the SSE2 version in the AVX2 xmm loop. Check
for mmsize instead, which it's always 32 since you used INIT_YMM.
Should be good otherwise, but wait for Hendrik in case he wants to comment.
More information about the ffmpeg-devel
mailing list