[FFmpeg-devel] [PATCH v2] sws/aarch64: add {nv12, nv21, yuv420p, yuv422p}_to_{argb, rgba, abgr, rgba}_neon

Clément Bœsch u at pkh.me
Mon Feb 29 10:55:49 CET 2016


From: Clément Bœsch <clement at stupeflix.com>

---
Changes since latest version:
- remove unused 32-bit path
- make 16-bit path more accurate by mirroring the MMX code (still not bitexact)
- the code as originally trying to process 2 lines at a time to save chroma pre
  mult computations and avoid re-reading the whole line; for some reason, this
  actually made the code around twice slower, for twice the complexity.
  dropping that complexity was a win-win.
---
 libswscale/aarch64/Makefile           |   3 +
 libswscale/aarch64/swscale_unscaled.c | 132 ++++++++++++++++++++++
 libswscale/aarch64/yuv2rgb_neon.S     | 207 ++++++++++++++++++++++++++++++++++
 libswscale/swscale_internal.h         |   1 +
 libswscale/swscale_unscaled.c         |   2 +
 5 files changed, 345 insertions(+)
 create mode 100644 libswscale/aarch64/Makefile
 create mode 100644 libswscale/aarch64/swscale_unscaled.c
 create mode 100644 libswscale/aarch64/yuv2rgb_neon.S

diff --git a/libswscale/aarch64/Makefile b/libswscale/aarch64/Makefile
new file mode 100644
index 0000000..823806e
--- /dev/null
+++ b/libswscale/aarch64/Makefile
@@ -0,0 +1,3 @@
+OBJS        += aarch64/swscale_unscaled.o
+
+NEON-OBJS   += aarch64/yuv2rgb_neon.o
diff --git a/libswscale/aarch64/swscale_unscaled.c b/libswscale/aarch64/swscale_unscaled.c
new file mode 100644
index 0000000..551daad
--- /dev/null
+++ b/libswscale/aarch64/swscale_unscaled.c
@@ -0,0 +1,132 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+#include "libavutil/aarch64/cpu.h"
+
+#define YUV_TO_RGB_TABLE                                                                    \
+        c->yuv2rgb_v2r_coeff,                                                               \
+        c->yuv2rgb_u2g_coeff,                                                               \
+        c->yuv2rgb_v2g_coeff,                                                               \
+        c->yuv2rgb_u2b_coeff,                                                               \
+
+#define DECLARE_FF_YUVX_TO_RGBX_FUNCS(ifmt, ofmt)                                           \
+int ff_##ifmt##_to_##ofmt##_neon(int w, int h,                                              \
+                                 uint8_t *dst, int linesize,                                \
+                                 const uint8_t *srcY, int linesizeY,                        \
+                                 const uint8_t *srcU, int linesizeU,                        \
+                                 const uint8_t *srcV, int linesizeV,                        \
+                                 const int16_t *table,                                      \
+                                 int y_offset,                                              \
+                                 int y_coeff);                                              \
+                                                                                            \
+static int ifmt##_to_##ofmt##_neon_wrapper(SwsContext *c, const uint8_t *src[],             \
+                                           int srcStride[], int srcSliceY, int srcSliceH,   \
+                                           uint8_t *dst[], int dstStride[]) {               \
+    const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE };                                   \
+                                                                                            \
+    ff_##ifmt##_to_##ofmt##_neon(c->srcW, srcSliceH,                                        \
+                                 dst[0] + srcSliceY * dstStride[0], dstStride[0],           \
+                                 src[0], srcStride[0],                                      \
+                                 src[1], srcStride[1],                                      \
+                                 src[2], srcStride[2],                                      \
+                                 yuv2rgb_table,                                             \
+                                 c->yuv2rgb_y_offset >> 6,                                  \
+                                 c->yuv2rgb_y_coeff);                                       \
+    return 0;                                                                               \
+}                                                                                           \
+
+#define DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuvx)                                             \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, argb)                                                   \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, rgba)                                                   \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, abgr)                                                   \
+DECLARE_FF_YUVX_TO_RGBX_FUNCS(yuvx, bgra)                                                   \
+
+DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuv420p)
+DECLARE_FF_YUVX_TO_ALL_RGBX_FUNCS(yuv422p)
+
+#define DECLARE_FF_NVX_TO_RGBX_FUNCS(ifmt, ofmt)                                            \
+int ff_##ifmt##_to_##ofmt##_neon(int w, int h,                                              \
+                                 uint8_t *dst, int linesize,                                \
+                                 const uint8_t *srcY, int linesizeY,                        \
+                                 const uint8_t *srcC, int linesizeC,                        \
+                                 const int16_t *table,                                      \
+                                 int y_offset,                                              \
+                                 int y_coeff);                                              \
+                                                                                            \
+static int ifmt##_to_##ofmt##_neon_wrapper(SwsContext *c, const uint8_t *src[],             \
+                                           int srcStride[], int srcSliceY, int srcSliceH,   \
+                                           uint8_t *dst[], int dstStride[]) {               \
+    const int16_t yuv2rgb_table[] = { YUV_TO_RGB_TABLE };                                   \
+                                                                                            \
+    ff_##ifmt##_to_##ofmt##_neon(c->srcW, srcSliceH,                                        \
+                                 dst[0] + srcSliceY * dstStride[0], dstStride[0],           \
+                                 src[0], srcStride[0], src[1], srcStride[1],                \
+                                 yuv2rgb_table,                                             \
+                                 c->yuv2rgb_y_offset >> 6,                                  \
+                                 c->yuv2rgb_y_coeff);                                       \
+                                                                                            \
+    return 0;                                                                               \
+}                                                                                           \
+
+#define DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nvx)                                               \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, argb)                                                     \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, rgba)                                                     \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, abgr)                                                     \
+DECLARE_FF_NVX_TO_RGBX_FUNCS(nvx, bgra)                                                     \
+
+DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nv12)
+DECLARE_FF_NVX_TO_ALL_RGBX_FUNCS(nv21)
+
+/* We need a 16 pixel width alignment. This constraint can easily be removed
+ * for input reading but for the output which is 4-bytes per pixel (RGBA) the
+ * assembly might be writing as much as 4*15=60 extra bytes at the end of the
+ * line, which won't fit the 32-bytes buffer alignment. */
+#define SET_FF_NVX_TO_RGBX_FUNC(ifmt, IFMT, ofmt, OFMT, accurate_rnd) do {                  \
+    if (c->srcFormat == AV_PIX_FMT_##IFMT                                                   \
+        && c->dstFormat == AV_PIX_FMT_##OFMT                                                \
+        && !(c->srcH & 1)                                                                   \
+        && !(c->srcW & 15)                                                                  \
+        && !accurate_rnd)                                                                   \
+        c->swscale = ifmt##_to_##ofmt##_neon_wrapper;                                       \
+} while (0)
+
+#define SET_FF_NVX_TO_ALL_RGBX_FUNC(nvx, NVX, accurate_rnd) do {                            \
+    SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, argb, ARGB, accurate_rnd);                            \
+    SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, rgba, RGBA, accurate_rnd);                            \
+    SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, abgr, ABGR, accurate_rnd);                            \
+    SET_FF_NVX_TO_RGBX_FUNC(nvx, NVX, bgra, BGRA, accurate_rnd);                            \
+} while (0)
+
+static void get_unscaled_swscale_neon(SwsContext *c) {
+    int accurate_rnd = c->flags & SWS_ACCURATE_RND;
+
+    SET_FF_NVX_TO_ALL_RGBX_FUNC(nv12, NV12, accurate_rnd);
+    SET_FF_NVX_TO_ALL_RGBX_FUNC(nv21, NV21, accurate_rnd);
+    SET_FF_NVX_TO_ALL_RGBX_FUNC(yuv420p, YUV420P, accurate_rnd);
+    SET_FF_NVX_TO_ALL_RGBX_FUNC(yuv422p, YUV422P, accurate_rnd);
+}
+
+void ff_get_unscaled_swscale_aarch64(SwsContext *c)
+{
+    int cpu_flags = av_get_cpu_flags();
+    if (have_neon(cpu_flags))
+        get_unscaled_swscale_neon(c);
+}
diff --git a/libswscale/aarch64/yuv2rgb_neon.S b/libswscale/aarch64/yuv2rgb_neon.S
new file mode 100644
index 0000000..31cf651
--- /dev/null
+++ b/libswscale/aarch64/yuv2rgb_neon.S
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2015 Matthieu Bouron <matthieu.bouron stupeflix.com>
+ * Copyright (c) 2015 Clément Bœsch <clement stupeflix.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+.macro load_args_nv12
+    ldr                 x8,  [sp]                                       // table
+    ldr                 w9,  [sp, #8]                                   // y_offset
+    ldr                 w10, [sp, #16]                                  // y_coeff
+    ld1                 {v1.1D}, [x8]
+    dup                 v0.8H, w10
+    dup                 v3.8H, w9
+    sub                 w3, w3, w0, lsl #2                              // w3 = linesize  - width * 4 (padding)
+    sub                 w5, w5, w0                                      // w5 = linesizeY - width     (paddingY)
+    sub                 w7, w7, w0                                      // w7 = linesizeC - width     (paddingC)
+    neg                 w11, w0
+.endm
+
+.macro load_args_nv21
+    load_args_nv12
+.endm
+
+.macro load_args_yuv420p
+    ldr                 x13, [sp]                                       // srcV
+    ldr                 w14, [sp, #8]                                   // linesizeV
+    ldr                 x8,  [sp, #16]                                  // table
+    ldr                 w9,  [sp, #24]                                  // y_offset
+    ldr                 w10, [sp, #32]                                  // y_coeff
+    ld1                 {v1.1D}, [x8]
+    dup                 v0.8H, w10
+    dup                 v3.8H, w9
+    sub                 w3, w3, w0, lsl #2                              // w3 = linesize  - width * 4 (padding)
+    sub                 w5, w5, w0                                      // w5 = linesizeY - width     (paddingY)
+    sub                 w7,  w7,  w0, lsr #1                            // w7  = linesizeU - width / 2 (paddingU)
+    sub                 w14, w14, w0, lsr #1                            // w14 = linesizeV - width / 2 (paddingV)
+    lsr                 w11, w0, #1
+    neg                 w11, w11
+.endm
+
+.macro load_args_yuv422p
+    ldr                 x13, [sp]                                       // srcV
+    ldr                 w14, [sp, #8]                                   // linesizeV
+    ldr                 x8,  [sp, #16]                                  // table
+    ldr                 w9,  [sp, #24]                                  // y_offset
+    ldr                 w10, [sp, #32]                                  // y_coeff
+    ld1                 {v1.1D}, [x8]
+    dup                 v0.8H, w10
+    dup                 v3.8H, w9
+    sub                 w3, w3, w0, lsl #2                              // w3 = linesize  - width * 4 (padding)
+    sub                 w5, w5, w0                                      // w5 = linesizeY - width     (paddingY)
+    sub                 w7,  w7,  w0, lsr #1                            // w7  = linesizeU - width / 2 (paddingU)
+    sub                 w14, w14, w0, lsr #1                            // w14 = linesizeV - width / 2 (paddingV)
+.endm
+
+.macro load_chroma_nv12
+    ld2                 {v16.8B, v17.8B}, [x6], #16
+    ushll               v18.8H, v16.8B, #3
+    ushll               v19.8H, v17.8B, #3
+.endm
+
+.macro load_chroma_nv21
+    ld2                 {v16.8B, v17.8B}, [x6], #16
+    ushll               v19.8H, v16.8B, #3
+    ushll               v18.8H, v17.8B, #3
+.endm
+
+.macro load_chroma_yuv420p
+    ld1                 {v16.8B}, [ x6], #8
+    ld1                 {v17.8B}, [x13], #8
+    ushll               v18.8H, v16.8B, #3
+    ushll               v19.8H, v17.8B, #3
+.endm
+
+.macro load_chroma_yuv422p
+    load_chroma_yuv420p
+.endm
+
+.macro increment_nv12
+    ands                w15, w1, #1
+    csel                w16, w7, w11, ne                                // incC = (h & 1) ? paddincC : -width
+    add                 x6,  x6, w16, SXTW                              // srcC += incC
+.endm
+
+.macro increment_nv21
+    increment_nv12
+.endm
+
+.macro increment_yuv420p
+    ands                w15, w1, #1
+    csel                w16,  w7, w11, ne                               // incU = (h & 1) ? paddincU : -width/2
+    csel                w17, w14, w11, ne                               // incV = (h & 1) ? paddincV : -width/2
+    add                 x6,  x6,  w16, SXTW                             // srcU += incU
+    add                 x13, x13, w17, SXTW                             // srcV += incV
+.endm
+
+.macro increment_yuv422p
+    add                 x6,  x6,  w7, UXTW                              // srcU += incU
+    add                 x13, x13, w14, UXTW                             // srcV += incV
+.endm
+
+.macro compute_rgba r1 g1 b1 a1 r2 g2 b2 a2
+    add                 v20.8H, v26.8H, v20.8H                          // Y1 + R1
+    add                 v21.8H, v27.8H, v21.8H                          // Y2 + R2
+    add                 v22.8H, v26.8H, v22.8H                          // Y1 + G1
+    add                 v23.8H, v27.8H, v23.8H                          // Y2 + G2
+    add                 v24.8H, v26.8H, v24.8H                          // Y1 + B1
+    add                 v25.8H, v27.8H, v25.8H                          // Y2 + B2
+    sqrshrun            \r1, v20.8H, #1                                 // clip_u8((Y1 + R1) >> 1)
+    sqrshrun            \r2, v21.8H, #1                                 // clip_u8((Y2 + R1) >> 1)
+    sqrshrun            \g1, v22.8H, #1                                 // clip_u8((Y1 + G1) >> 1)
+    sqrshrun            \g2, v23.8H, #1                                 // clip_u8((Y2 + G1) >> 1)
+    sqrshrun            \b1, v24.8H, #1                                 // clip_u8((Y1 + B1) >> 1)
+    sqrshrun            \b2, v25.8H, #1                                 // clip_u8((Y2 + B1) >> 1)
+    movi                \a1, #255
+    movi                \a2, #255
+.endm
+
+.macro declare_func ifmt ofmt
+function ff_\ifmt\()_to_\ofmt\()_neon, export=1
+    load_args_\ifmt
+1:
+    mov                 w8, w0                                          // w8 = width
+2:
+    movi                v5.8H, #4, lsl #8                               // 128 * (1<<3)
+    load_chroma_\ifmt
+    sub                 v18.8H, v18.8H, v5.8H                           // U*(1<<3) - 128*(1<<3)
+    sub                 v19.8H, v19.8H, v5.8H                           // V*(1<<3) - 128*(1<<3)
+    zip1                v6.8H, v19.8H, v19.8H                           // V1
+    zip2                v7.8H, v19.8H, v19.8H                           // V2
+    zip1                v4.8H, v18.8H, v18.8H                           // U1
+    zip2                v5.8H, v18.8H, v18.8H                           // U2
+    sqdmulh             v20.8H, v6.8H, v1.H[0]                          // V1 * v2r             (R1)
+    sqdmulh             v21.8H, v7.8H, v1.H[0]                          // V2 * v2r             (R2)
+    sqdmulh             v22.8H, v4.8H, v1.H[1]                          // U1 * u2g
+    sqdmulh             v23.8H, v5.8H, v1.H[1]                          // U2 * u2g
+    sqdmulh             v6.8H,  v6.8H, v1.H[2]                          //            V1 * v2g
+    sqdmulh             v7.8H,  v7.8H, v1.H[2]                          //            V2 * v2g
+    add                 v22.8H, v22.8H, v6.8H                           // U1 * u2g + V1 * v2g  (G1)
+    add                 v23.8H, v23.8H, v7.8H                           // U2 * u2g + V2 * v2g  (G2)
+    sqdmulh             v24.8H, v4.8H, v1.H[3]                          // U1 * u2b             (B1)
+    sqdmulh             v25.8H, v5.8H, v1.H[3]                          // U2 * u2b             (B2)
+    ld1                 {v2.16B}, [x4], #16                             // load luma
+    ushll               v26.8H, v2.8B,  #3                              // Y1*(1<<3)
+    ushll2              v27.8H, v2.16B, #3                              // Y2*(1<<3)
+    sub                 v26.8H, v26.8H, v3.8H                           // Y1*(1<<3) - y_offset
+    sub                 v27.8H, v27.8H, v3.8H                           // Y2*(1<<3) - y_offset
+    sqdmulh             v26.8H, v26.8H, v0.8H                           // ((Y1*(1<<3) - y_offset) * y_coeff) >> 15
+    sqdmulh             v27.8H, v27.8H, v0.8H                           // ((Y2*(1<<3) - y_offset) * y_coeff) >> 15
+
+.ifc \ofmt,argb // 1 2 3 0
+    compute_rgba        v5.8B,v6.8B,v7.8B,v4.8B, v17.8B,v18.8B,v19.8B,v16.8B
+.endif
+
+.ifc \ofmt,rgba // 0 1 2 3
+    compute_rgba        v4.8B,v5.8B,v6.8B,v7.8B, v16.8B,v17.8B,v18.8B,v19.8B
+.endif
+
+.ifc \ofmt,abgr // 3 2 1 0
+    compute_rgba        v7.8B,v6.8B,v5.8B,v4.8B, v19.8B,v18.8B,v17.8B,v16.8B
+.endif
+
+.ifc \ofmt,bgra // 2 1 0 3
+    compute_rgba        v6.8B,v5.8B,v4.8B,v7.8B, v18.8B,v17.8B,v16.8B,v19.8B
+.endif
+
+    st4                 { v4.8B, v5.8B, v6.8B, v7.8B}, [x2], #32
+    st4                 {v16.8B,v17.8B,v18.8B,v19.8B}, [x2], #32
+    subs                w8, w8, #16                                     // width -= 16
+    b.gt                2b
+    add                 x2, x2, w3, UXTW                                // dst  += padding
+    add                 x4, x4, w5, UXTW                                // srcY += paddingY
+    increment_\ifmt
+    subs                w1, w1, #1                                      // height -= 1
+    b.gt                1b
+    ret
+endfunc
+.endm
+
+.macro declare_rgb_funcs ifmt
+    declare_func \ifmt, argb
+    declare_func \ifmt, rgba
+    declare_func \ifmt, abgr
+    declare_func \ifmt, bgra
+.endm
+
+declare_rgb_funcs nv12
+declare_rgb_funcs nv21
+declare_rgb_funcs yuv420p
+declare_rgb_funcs yuv422p
diff --git a/libswscale/swscale_internal.h b/libswscale/swscale_internal.h
index 1e29ec3..f0bab78 100644
--- a/libswscale/swscale_internal.h
+++ b/libswscale/swscale_internal.h
@@ -877,6 +877,7 @@ extern const AVClass ff_sws_context_class;
 void ff_get_unscaled_swscale(SwsContext *c);
 void ff_get_unscaled_swscale_ppc(SwsContext *c);
 void ff_get_unscaled_swscale_arm(SwsContext *c);
+void ff_get_unscaled_swscale_aarch64(SwsContext *c);
 
 /**
  * Return function pointer to fastest main scaler path function depending
diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c
index 6d1a384..4d6cfd1 100644
--- a/libswscale/swscale_unscaled.c
+++ b/libswscale/swscale_unscaled.c
@@ -1779,6 +1779,8 @@ void ff_get_unscaled_swscale(SwsContext *c)
         ff_get_unscaled_swscale_ppc(c);
      if (ARCH_ARM)
          ff_get_unscaled_swscale_arm(c);
+    if (ARCH_AARCH64)
+        ff_get_unscaled_swscale_aarch64(c);
 }
 
 /* Convert the palette to the same packed 32-bit format as the palette */
-- 
2.7.1



More information about the ffmpeg-devel mailing list