[FFmpeg-devel] [PATCH] Moves yuv2yuvX_sse3 to yasm, unrolls main loop and other small optimizations for ~20% speedup.

Alan Kelly alankelly at google.com
Thu Dec 10 17:46:26 EET 2020


---
 Replaces ff_sws_init_swscale_x86 with ff_getSwsFunc
 Load offset if not gprsize but 8 on both 32 and 64 bit
 Removes sfence as NT store no longer used
 libswscale/x86/Makefile     |   1 +
 libswscale/x86/swscale.c    | 106 +++++++++-----------------------
 libswscale/x86/yuv2yuvX.asm | 117 ++++++++++++++++++++++++++++++++++++
 tests/checkasm/sw_scale.c   | 101 ++++++++++++++++++++++++++++++-
 4 files changed, 248 insertions(+), 77 deletions(-)
 create mode 100644 libswscale/x86/yuv2yuvX.asm

diff --git a/libswscale/x86/Makefile b/libswscale/x86/Makefile
index 831d5359aa..bfe383364e 100644
--- a/libswscale/x86/Makefile
+++ b/libswscale/x86/Makefile
@@ -13,3 +13,4 @@ X86ASM-OBJS                     += x86/input.o                          \
                                    x86/scale.o                          \
                                    x86/rgb_2_rgb.o                      \
                                    x86/yuv_2_rgb.o                      \
+                                   x86/yuv2yuvX.o                       \
diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c
index 3160fedf04..8cd8713705 100644
--- a/libswscale/x86/swscale.c
+++ b/libswscale/x86/swscale.c
@@ -197,81 +197,30 @@ void ff_updateMMXDitherTables(SwsContext *c, int dstY)
 }
 
 #if HAVE_MMXEXT
-static void yuv2yuvX_sse3(const int16_t *filter, int filterSize,
-                           const int16_t **src, uint8_t *dest, int dstW,
-                           const uint8_t *dither, int offset)
-{
-    if(((uintptr_t)dest) & 15){
-        yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset);
-        return;
-    }
-    filterSize--;
-#define MAIN_FUNCTION \
-        "pxor       %%xmm0, %%xmm0 \n\t" \
-        "punpcklbw  %%xmm0, %%xmm3 \n\t" \
-        "movd           %4, %%xmm1 \n\t" \
-        "punpcklwd  %%xmm1, %%xmm1 \n\t" \
-        "punpckldq  %%xmm1, %%xmm1 \n\t" \
-        "punpcklqdq %%xmm1, %%xmm1 \n\t" \
-        "psllw          $3, %%xmm1 \n\t" \
-        "paddw      %%xmm1, %%xmm3 \n\t" \
-        "psraw          $4, %%xmm3 \n\t" \
-        "movdqa     %%xmm3, %%xmm4 \n\t" \
-        "movdqa     %%xmm3, %%xmm7 \n\t" \
-        "movl           %3, %%ecx  \n\t" \
-        "mov                                 %0, %%"FF_REG_d"        \n\t"\
-        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"     \n\t"\
-        ".p2align                             4             \n\t" /* FIXME Unroll? */\
-        "1:                                                 \n\t"\
-        "movddup                  8(%%"FF_REG_d"), %%xmm0   \n\t" /* filterCoeff */\
-        "movdqa              (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2 \n\t" /* srcData */\
-        "movdqa            16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5 \n\t" /* srcData */\
-        "add                                $16, %%"FF_REG_d"        \n\t"\
-        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"     \n\t"\
-        "test                         %%"FF_REG_S", %%"FF_REG_S"     \n\t"\
-        "pmulhw                           %%xmm0, %%xmm2      \n\t"\
-        "pmulhw                           %%xmm0, %%xmm5      \n\t"\
-        "paddw                            %%xmm2, %%xmm3      \n\t"\
-        "paddw                            %%xmm5, %%xmm4      \n\t"\
-        " jnz                                1b             \n\t"\
-        "psraw                               $3, %%xmm3      \n\t"\
-        "psraw                               $3, %%xmm4      \n\t"\
-        "packuswb                         %%xmm4, %%xmm3      \n\t"\
-        "movntdq                          %%xmm3, (%1, %%"FF_REG_c") \n\t"\
-        "add                         $16, %%"FF_REG_c"        \n\t"\
-        "cmp                          %2, %%"FF_REG_c"        \n\t"\
-        "movdqa                   %%xmm7, %%xmm3            \n\t" \
-        "movdqa                   %%xmm7, %%xmm4            \n\t" \
-        "mov                                 %0, %%"FF_REG_d"        \n\t"\
-        "mov                        (%%"FF_REG_d"), %%"FF_REG_S"     \n\t"\
-        "jb                                  1b             \n\t"
-
-    if (offset) {
-        __asm__ volatile(
-            "movq          %5, %%xmm3  \n\t"
-            "movdqa    %%xmm3, %%xmm4  \n\t"
-            "psrlq        $24, %%xmm3  \n\t"
-            "psllq        $40, %%xmm4  \n\t"
-            "por       %%xmm4, %%xmm3  \n\t"
-            MAIN_FUNCTION
-              :: "g" (filter),
-              "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset),
-              "m"(filterSize), "m"(((uint64_t *) dither)[0])
-              : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,)
-                "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
-              );
-    } else {
-        __asm__ volatile(
-            "movq          %5, %%xmm3   \n\t"
-            MAIN_FUNCTION
-              :: "g" (filter),
-              "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset),
-              "m"(filterSize), "m"(((uint64_t *) dither)[0])
-              : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,)
-                "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
-              );
-    }
+#define YUV2YUVX_FUNC(opt, step)  \
+void ff_yuv2yuvX_ ##opt(const int16_t *filter, long filterSize, const int16_t **src, \
+                           uint8_t *dest, int dstW, \
+                           const uint8_t *dither, int offset); \
+static void yuv2yuvX_ ##opt(const int16_t *filter, int filterSize, \
+                           const int16_t **src, uint8_t *dest, int dstW, \
+                           const uint8_t *dither, int offset) \
+{ \
+    int remainder = (dstW % step); \
+    int pixelsProcessed = dstW - remainder; \
+    if(((uintptr_t)dest) & 15){ \
+        yuv2yuvX_mmx(filter, filterSize, src, dest, dstW, dither, offset); \
+        return; \
+    } \
+    ff_yuv2yuvX_ ##opt(filter, filterSize - 1, src, dest - offset, pixelsProcessed + offset, dither, offset); \
+    if(remainder > 0){ \
+      yuv2yuvX_mmx(filter, filterSize, src, dest + pixelsProcessed, remainder, dither, offset + pixelsProcessed); \
+    } \
+    return; \
 }
+
+YUV2YUVX_FUNC(sse3, 32)
+YUV2YUVX_FUNC(avx2, 64)
+
 #endif
 
 #endif /* HAVE_INLINE_ASM */
@@ -402,9 +351,14 @@ av_cold void ff_sws_init_swscale_x86(SwsContext *c)
 #if HAVE_MMXEXT_INLINE
     if (INLINE_MMXEXT(cpu_flags))
         sws_init_swscale_mmxext(c);
-    if (cpu_flags & AV_CPU_FLAG_SSE3){
-        if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND))
+    if (cpu_flags & AV_CPU_FLAG_AVX2){
+        if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)){
+            c->yuv2planeX = yuv2yuvX_avx2;
+        }
+    } else if (cpu_flags & AV_CPU_FLAG_SSE3){
+        if(c->use_mmx_vfilter && !(c->flags & SWS_ACCURATE_RND)){
             c->yuv2planeX = yuv2yuvX_sse3;
+        }
     }
 #endif
 
diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm
new file mode 100644
index 0000000000..899b84c50b
--- /dev/null
+++ b/libswscale/x86/yuv2yuvX.asm
@@ -0,0 +1,117 @@
+;******************************************************************************
+;* x86-optimized yuv2yuvX
+;* Copyright 2020 Google LLC
+;* Copyright (C) 2001-2011 Michael Niedermayer <michaelni at gmx.at>
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; yuv2yuvX
+;
+; void ff_yuv2yuvX_<opt>(const int16_t *filter, int filterSize,
+;                        uint8_t *dest, int dstW,
+;                        const uint8_t *dither, int offset);
+;
+;-----------------------------------------------------------------------------
+
+%macro YUV2YUVX_FUNC 0
+cglobal yuv2yuvX, 7, 7, 8, filter, filterSize, src, dest, dstW, dither, offset
+%if ARCH_X86_64
+    movsxd               dstWq, dstWd
+    movsxd               offsetq, offsetd
+%endif ; x86-64
+    movddup              m0, [filterq + 8]
+%if cpuflag(avx2)
+    vpbroadcastq         m3, [ditherq]
+%else
+    movq                 xmm3, [ditherq]
+%endif ; avx2
+    cmp                  offsetd, 0
+    jz                   .offset
+
+    ; offset != 0 path.
+    psrlq                m5, m3, $18
+    psllq                m3, m3, $28
+    por                  m3, m3, m5
+
+.offset:
+    movd                 xmm1, filterSized
+%if cpuflag(avx2)
+    vpbroadcastw         m1, xmm1
+%else
+    pshuflw              m1, m1, q0000
+    punpcklqdq           m1, m1
+%endif ; avx2
+    pxor                 m0, m0, m0
+    mov                  filterSizeq, filterq
+    mov                  srcq, [filterSizeq]
+    punpcklbw            m3, m0
+    psllw                m1, m1, 3
+    paddw                m3, m3, m1
+    psraw                m7, m3, 4
+.outerloop:
+    mova                 m4, m7
+    mova                 m3, m7
+    mova                 m6, m7
+    mova                 m1, m7
+.loop:
+%if cpuflag(avx2)
+    vpbroadcastq         m0, [filterSizeq + 8]
+%else
+    movddup              m0, [filterSizeq + 8]
+%endif
+    pmulhw               m2, m0, [srcq + offsetq * 2]
+    pmulhw               m5, m0, [srcq + offsetq * 2 + mmsize]
+    paddw                m3, m3, m2
+    paddw                m4, m4, m5
+    pmulhw               m2, m0, [srcq + offsetq * 2 + 2 * mmsize]
+    pmulhw               m5, m0, [srcq + offsetq * 2 + 3 * mmsize]
+    paddw                m6, m6, m2
+    paddw                m1, m1, m5
+    add                  filterSizeq, $10
+    mov                  srcq, [filterSizeq]
+    test                 srcq, srcq
+    jnz                  .loop
+    psraw                m3, m3, 3
+    psraw                m4, m4, 3
+    psraw                m6, m6, 3
+    psraw                m1, m1, 3
+    packuswb             m3, m3, m4
+    packuswb             m6, m6, m1
+    mov                  srcq, [filterq]
+%if cpuflag(avx2)
+    vpermq               m3, m3, 216
+    vpermq               m6, m6, 216
+%endif
+    mova                 [destq + offsetq], m3
+    mova                 [destq + offsetq + mmsize], m6
+    add                  offsetq, mmsize * 2
+    mov                  filterSizeq, filterq
+    cmp                  offsetq, dstWq
+    jb                  .outerloop
+    REP_RET
+%endmacro
+
+INIT_XMM sse3
+YUV2YUVX_FUNC
+INIT_YMM avx2
+YUV2YUVX_FUNC
diff --git a/tests/checkasm/sw_scale.c b/tests/checkasm/sw_scale.c
index 9efa2b4def..b599ba6d67 100644
--- a/tests/checkasm/sw_scale.c
+++ b/tests/checkasm/sw_scale.c
@@ -35,7 +35,104 @@
             AV_WN32(buf + j, rnd());      \
     } while (0)
 
-#define SRC_PIXELS 128
+#define SRC_PIXELS 1024
+
+
+// This reference function is the same approximate algorithm employed by the
+// SIMD functions
+static void ref_function(const int16_t *filter, int filterSize,
+                                                 const int16_t **src, uint8_t *dest, int dstW,
+                                                 const uint8_t *dither, int offset)
+{
+    int i, d;
+    d = ((filterSize - 1) * 8 + dither[0]) >> 4;
+    for (i=0; i<dstW; i++) {
+        int16_t val = d;
+        int j;
+        union {
+            int val;
+            int16_t v[2];
+        } t;
+        for (j=0; j<filterSize; j++){
+            t.val = (int)src[j][i + offset] * (int)filter[j];
+            val += t.v[1];
+        }
+        dest[i]= av_clip_uint8(val>>3);
+    }
+}
+
+static void check_yuv2yuvX(void)
+{
+    struct SwsContext *ctx;
+    int fsi, osi;
+#define LARGEST_FILTER 8
+#define FILTER_SIZES 4
+    static const int filter_sizes[FILTER_SIZES] = {1, 4, 8, 16};
+
+    declare_func_emms(AV_CPU_FLAG_MMX, void, const int16_t *filter,
+                      int filterSize, const int16_t **src, uint8_t *dest,
+                      int dstW, const uint8_t *dither, int offset);
+
+    int dstW = SRC_PIXELS;
+    const int16_t **src;
+    LOCAL_ALIGNED_32(int16_t, filter_coeff, [LARGEST_FILTER]);
+    LOCAL_ALIGNED_32(uint8_t, dst0, [SRC_PIXELS]);
+    LOCAL_ALIGNED_32(uint8_t, dst1, [SRC_PIXELS]);
+    LOCAL_ALIGNED_32(uint8_t, dither, [SRC_PIXELS]);
+    union VFilterData{
+        const int16_t *src;
+        uint16_t coeff[8];
+    } *vFilterData;
+    uint8_t d_val = rnd();
+    randomize_buffers(filter_coeff, LARGEST_FILTER);
+    ctx = sws_alloc_context();
+    if (sws_init_context(ctx, NULL, NULL) < 0)
+        fail();
+
+    ff_getSwsFunc(ctx);
+    for(int i = 0; i < SRC_PIXELS; ++i){
+        dither[i] = d_val;
+    }
+    for(osi = 0; osi < 64; osi += 16){
+        for(fsi = 0; fsi < FILTER_SIZES; ++fsi){
+            src = malloc(sizeof(int16_t*) * filter_sizes[fsi]);
+            vFilterData = malloc((filter_sizes[fsi] + 2) * sizeof(union VFilterData));
+            memset(vFilterData, 0, (filter_sizes[fsi] + 2) * sizeof(union VFilterData));
+            for(int i = 0; i < filter_sizes[fsi]; ++i){
+                src[i] = malloc(sizeof(int16_t) * SRC_PIXELS);
+                randomize_buffers(src[i], SRC_PIXELS);
+                vFilterData[i].src = src[i];
+            }
+            for(int i = 0; i < filter_sizes[fsi]; ++i){
+                for(int j = 0; j < 4; ++j){
+                    vFilterData[i].coeff[j + 4] = filter_coeff[i];
+                }
+            }
+            if (check_func(ctx->yuv2planeX, "yuv2yuvX_%d_%d", filter_sizes[fsi], osi)){
+                memset(dst0, 0, SRC_PIXELS * sizeof(dst0[0]));
+                memset(dst1, 0, SRC_PIXELS * sizeof(dst1[0]));
+
+                // The reference function is not the scalar function selected when mmx
+                // is deactivated as the SIMD functions do not give the same result as
+                // the scalar ones due to rounding. The SIMD functions are activated by
+                // the flag SWS_ACCURATE_RND
+                ref_function(&filter_coeff[0], filter_sizes[fsi], src, dst0, dstW - osi, dither, osi);
+                // There's no point in calling new for the reference function
+                if(ctx->use_mmx_vfilter){
+                    call_new((const int16_t*)vFilterData, filter_sizes[fsi], src, dst1, dstW - osi, dither, osi);
+                    if (memcmp(dst0, dst1, SRC_PIXELS * sizeof(dst0[0]))){
+                        fail();
+                    }
+                    bench_new((const int16_t*)vFilterData, filter_sizes[fsi], src, dst1, dstW - osi, dither, osi);
+                }
+            }
+            free(src);
+            free(vFilterData);
+        }
+    }
+    sws_freeContext(ctx);
+#undef FILTER_SIZES
+}
 
 static void check_hscale(void)
 {
@@ -131,4 +228,6 @@ void checkasm_check_sw_scale(void)
 {
     check_hscale();
     report("hscale");
+    check_yuv2yuvX();
+    report("yuv2yuvX");
 }
-- 
2.29.2.576.ga3fc446d84-goog



More information about the ffmpeg-devel mailing list