[FFmpeg-cvslog] vp9mc/x86: add AVX and AVX2 MC
James Almer
git at videolan.org
Thu Mar 16 21:27:40 EET 2017
ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Mon Sep 22 21:55:13 2014 -0300| [67922b4ee48b5a5850ebf2cb6fcddf5979a26f68] | committer: Anton Khirnov
vp9mc/x86: add AVX and AVX2 MC
Roughly 25% faster MC than ssse3 for blocksizes 32 and 64.
Reviewed-by: Ronald S. Bultje <rsbultje at gmail.com>
Signed-off-by: James Almer <jamrial at gmail.com>
Signed-off-by: Anton Khirnov <anton at khirnov.net>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=67922b4ee48b5a5850ebf2cb6fcddf5979a26f68
---
libavcodec/x86/constants.c | 3 +-
libavcodec/x86/constants.h | 2 +-
libavcodec/x86/vp9dsp_init.c | 210 ++++++++++++++++++++++++++-----------------
libavcodec/x86/vp9mc.asm | 74 +++++++++------
4 files changed, 178 insertions(+), 111 deletions(-)
diff --git a/libavcodec/x86/constants.c b/libavcodec/x86/constants.c
index 47f6ef5..6f7dd73 100644
--- a/libavcodec/x86/constants.c
+++ b/libavcodec/x86/constants.c
@@ -43,7 +43,8 @@ DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x004
DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
DECLARE_ALIGNED(8, const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
-DECLARE_ALIGNED(16, const xmm_reg, ff_pw_256) = { 0x0100010001000100ULL, 0x0100010001000100ULL };
+DECLARE_ALIGNED(32, const ymm_reg, ff_pw_256) = { 0x0100010001000100ULL, 0x0100010001000100ULL,
+ 0x0100010001000100ULL, 0x0100010001000100ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_m1) = { 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL };
diff --git a/libavcodec/x86/constants.h b/libavcodec/x86/constants.h
index c3b8d50..59ff947 100644
--- a/libavcodec/x86/constants.h
+++ b/libavcodec/x86/constants.h
@@ -42,7 +42,7 @@ extern const xmm_reg ff_pw_64;
extern const uint64_t ff_pw_96;
extern const uint64_t ff_pw_128;
extern const uint64_t ff_pw_255;
-extern const xmm_reg ff_pw_256;
+extern const ymm_reg ff_pw_256;
extern const xmm_reg ff_pw_512;
extern const xmm_reg ff_pw_m1;
diff --git a/libavcodec/x86/vp9dsp_init.c b/libavcodec/x86/vp9dsp_init.c
index 1366296..8c4af83 100644
--- a/libavcodec/x86/vp9dsp_init.c
+++ b/libavcodec/x86/vp9dsp_init.c
@@ -45,6 +45,10 @@ fpel_func(avg, 8, mmxext);
fpel_func(avg, 16, sse2);
fpel_func(avg, 32, sse2);
fpel_func(avg, 64, sse2);
+fpel_func(put, 32, avx);
+fpel_func(put, 64, avx);
+fpel_func(avg, 32, avx2);
+fpel_func(avg, 64, avx2);
#undef fpel_func
#define mc_func(avg, sz, dir, opt) \
@@ -54,18 +58,19 @@ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst,
ptrdiff_t dst_stride, \
ptrdiff_t src_stride, \
int h, \
- const int8_t (*filter)[16])
+ const int8_t (*filter)[32])
-#define mc_funcs(sz) \
- mc_func(put, sz, h, ssse3); \
- mc_func(avg, sz, h, ssse3); \
- mc_func(put, sz, v, ssse3); \
- mc_func(avg, sz, v, ssse3)
+#define mc_funcs(sz, opt) \
+ mc_func(put, sz, h, opt); \
+ mc_func(avg, sz, h, opt); \
+ mc_func(put, sz, v, opt); \
+ mc_func(avg, sz, v, opt)
-mc_funcs(4);
-mc_funcs(8);
+mc_funcs(4, ssse3);
+mc_funcs(8, ssse3);
#if ARCH_X86_64
-mc_funcs(16);
+mc_funcs(16, ssse3);
+mc_funcs(32, avx2);
#endif
#undef mc_funcs
@@ -78,7 +83,7 @@ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \
ptrdiff_t dst_stride, \
ptrdiff_t src_stride, \
int h, \
- const int8_t (*filter)[16]) \
+ const int8_t (*filter)[32]) \
{ \
ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## hsz ## _ ## opt(dst, src, \
dst_stride, \
@@ -92,94 +97,109 @@ ff_vp9_ ## avg ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(uint8_t *dst, \
h, filter); \
}
-#define mc_rep_funcs(sz, hsz) \
- mc_rep_func(put, sz, hsz, h, ssse3); \
- mc_rep_func(avg, sz, hsz, h, ssse3); \
- mc_rep_func(put, sz, hsz, v, ssse3); \
- mc_rep_func(avg, sz, hsz, v, ssse3)
+#define mc_rep_funcs(sz, hsz, opt) \
+ mc_rep_func(put, sz, hsz, h, opt); \
+ mc_rep_func(avg, sz, hsz, h, opt); \
+ mc_rep_func(put, sz, hsz, v, opt); \
+ mc_rep_func(avg, sz, hsz, v, opt)
#if ARCH_X86_32
-mc_rep_funcs(16, 8);
+mc_rep_funcs(16, 8, ssse3);
+#endif
+mc_rep_funcs(32, 16, ssse3);
+mc_rep_funcs(64, 32, ssse3);
+#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+mc_rep_funcs(64, 32, avx2);
#endif
-mc_rep_funcs(32, 16);
-mc_rep_funcs(64, 32);
#undef mc_rep_funcs
#undef mc_rep_func
-extern const int8_t ff_filters_ssse3[3][15][4][16];
-
-#define filter_8tap_2d_fn(op, sz, f, fname) \
-static void \
-op ## _8tap_ ## fname ## _ ## sz ## hv_ssse3(uint8_t *dst, \
- const uint8_t *src, \
- ptrdiff_t dst_stride, \
- ptrdiff_t src_stride, \
- int h, int mx, int my) \
-{ \
- LOCAL_ALIGNED_16(uint8_t, temp, [71 * 64]); \
- ff_vp9_put_8tap_1d_h_ ## sz ## _ssse3(temp, src - 3 * src_stride, \
- 64, src_stride, \
- h + 7, \
- ff_filters_ssse3[f][mx - 1]); \
- ff_vp9_ ## op ## _8tap_1d_v_ ## sz ## _ssse3(dst, temp + 3 * 64, \
- dst_stride, 64, \
- h, \
- ff_filters_ssse3[f][my - 1]); \
+extern const int8_t ff_filters_ssse3[3][15][4][32];
+
+#define filter_8tap_2d_fn(op, sz, f, fname, align, opt) \
+static void \
+op ## _8tap_ ## fname ## _ ## sz ## hv_ ## opt(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t dst_stride, \
+ ptrdiff_t src_stride, \
+ int h, int mx, int my) \
+{ \
+ LOCAL_ALIGNED_ ## align(uint8_t, temp, [71 * 64]); \
+ ff_vp9_put_8tap_1d_h_ ## sz ## _ ## opt(temp, src - 3 * src_stride, \
+ 64, src_stride, \
+ h + 7, \
+ ff_filters_ssse3[f][mx - 1]); \
+ ff_vp9_ ## op ## _8tap_1d_v_ ## sz ## _ ## opt(dst, temp + 3 * 64, \
+ dst_stride, 64, \
+ h, \
+ ff_filters_ssse3[f][my - 1]); \
}
-#define filters_8tap_2d_fn(op, sz) \
- filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, regular) \
- filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, sharp) \
- filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth)
-
-#define filters_8tap_2d_fn2(op) \
- filters_8tap_2d_fn(op, 64) \
- filters_8tap_2d_fn(op, 32) \
- filters_8tap_2d_fn(op, 16) \
- filters_8tap_2d_fn(op, 8) \
- filters_8tap_2d_fn(op, 4)
-
-filters_8tap_2d_fn2(put)
-filters_8tap_2d_fn2(avg)
+#define filters_8tap_2d_fn(op, sz, align, opt) \
+ filter_8tap_2d_fn(op, sz, FILTER_8TAP_REGULAR, regular, align, opt) \
+ filter_8tap_2d_fn(op, sz, FILTER_8TAP_SHARP, sharp, align, opt) \
+ filter_8tap_2d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth, align, opt)
+
+#define filters_8tap_2d_fn2(op, align, opt) \
+ filters_8tap_2d_fn(op, 64, align, opt) \
+ filters_8tap_2d_fn(op, 32, align, opt) \
+ filters_8tap_2d_fn(op, 16, align, opt) \
+ filters_8tap_2d_fn(op, 8, align, opt) \
+ filters_8tap_2d_fn(op, 4, align, opt)
+
+filters_8tap_2d_fn2(put, 16, ssse3)
+filters_8tap_2d_fn2(avg, 16, ssse3)
+#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+filters_8tap_2d_fn(put, 64, 32, avx2)
+filters_8tap_2d_fn(put, 32, 32, avx2)
+filters_8tap_2d_fn(avg, 64, 32, avx2)
+filters_8tap_2d_fn(avg, 32, 32, avx2)
+#endif
#undef filters_8tap_2d_fn2
#undef filters_8tap_2d_fn
#undef filter_8tap_2d_fn
-#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar) \
-static void \
-op ## _8tap_ ## fname ## _ ## sz ## dir ## _ssse3(uint8_t *dst, \
- const uint8_t *src, \
- ptrdiff_t dst_stride, \
- ptrdiff_t src_stride, \
- int h, int mx, \
- int my) \
-{ \
- ff_vp9_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ssse3(dst, src, \
- dst_stride, \
- src_stride, h,\
- ff_filters_ssse3[f][dvar - 1]); \
+#define filter_8tap_1d_fn(op, sz, f, fname, dir, dvar, opt) \
+static void \
+op ## _8tap_ ## fname ## _ ## sz ## dir ## _ ## opt(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t dst_stride, \
+ ptrdiff_t src_stride, \
+ int h, int mx, \
+ int my) \
+{ \
+ ff_vp9_ ## op ## _8tap_1d_ ## dir ## _ ## sz ## _ ## opt(dst, src, \
+ dst_stride, \
+ src_stride, h,\
+ ff_filters_ssse3[f][dvar - 1]); \
}
-#define filters_8tap_1d_fn(op, sz, dir, dvar) \
- filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, regular, dir, dvar) \
- filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, sharp, dir, dvar) \
- filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth, dir, dvar)
-
-#define filters_8tap_1d_fn2(op, sz) \
- filters_8tap_1d_fn(op, sz, h, mx) \
- filters_8tap_1d_fn(op, sz, v, my)
-
-#define filters_8tap_1d_fn3(op) \
- filters_8tap_1d_fn2(op, 64) \
- filters_8tap_1d_fn2(op, 32) \
- filters_8tap_1d_fn2(op, 16) \
- filters_8tap_1d_fn2(op, 8) \
- filters_8tap_1d_fn2(op, 4)
-
-filters_8tap_1d_fn3(put)
-filters_8tap_1d_fn3(avg)
+#define filters_8tap_1d_fn(op, sz, dir, dvar, opt) \
+ filter_8tap_1d_fn(op, sz, FILTER_8TAP_REGULAR, regular, dir, dvar, opt) \
+ filter_8tap_1d_fn(op, sz, FILTER_8TAP_SHARP, sharp, dir, dvar, opt) \
+ filter_8tap_1d_fn(op, sz, FILTER_8TAP_SMOOTH, smooth, dir, dvar, opt)
+
+#define filters_8tap_1d_fn2(op, sz, opt) \
+ filters_8tap_1d_fn(op, sz, h, mx, opt) \
+ filters_8tap_1d_fn(op, sz, v, my, opt)
+
+#define filters_8tap_1d_fn3(op, opt) \
+ filters_8tap_1d_fn2(op, 64, opt) \
+ filters_8tap_1d_fn2(op, 32, opt) \
+ filters_8tap_1d_fn2(op, 16, opt) \
+ filters_8tap_1d_fn2(op, 8, opt) \
+ filters_8tap_1d_fn2(op, 4, opt)
+
+filters_8tap_1d_fn3(put, ssse3)
+filters_8tap_1d_fn3(avg, ssse3)
+#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+filters_8tap_1d_fn2(put, 64, avx2)
+filters_8tap_1d_fn2(put, 32, avx2)
+filters_8tap_1d_fn2(avg, 64, avx2)
+filters_8tap_1d_fn2(avg, 32, avx2)
+#endif
#undef filters_8tap_1d_fn
#undef filters_8tap_1d_fn2
@@ -205,9 +225,12 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = type ## _8tap_regular_ ## sz ## dir ## _ ## opt; \
dsp->mc[idx1][FILTER_8TAP_SHARP][idx2][idxh][idxv] = type ## _8tap_sharp_ ## sz ## dir ## _ ## opt
+#define init_subpel2_32_64(idx, idxh, idxv, dir, type, opt) \
+ init_subpel1(0, idx, idxh, idxv, 64, dir, type, opt); \
+ init_subpel1(1, idx, idxh, idxv, 32, dir, type, opt)
+
#define init_subpel2(idx, idxh, idxv, dir, type, opt) \
- init_subpel1(0, idx, idxh, idxv, 64, dir, type, opt); \
- init_subpel1(1, idx, idxh, idxv, 32, dir, type, opt); \
+ init_subpel2_32_64(idx, idxh, idxv, dir, type, opt); \
init_subpel1(2, idx, idxh, idxv, 16, dir, type, opt); \
init_subpel1(3, idx, idxh, idxv, 8, dir, type, opt); \
init_subpel1(4, idx, idxh, idxv, 4, dir, type, opt)
@@ -244,6 +267,25 @@ av_cold void ff_vp9dsp_init_x86(VP9DSPContext *dsp)
init_subpel3(1, avg, ssse3);
}
+ if (EXTERNAL_AVX(cpu_flags)) {
+ init_fpel(1, 0, 32, put, avx);
+ init_fpel(0, 0, 64, put, avx);
+ }
+
+ if (EXTERNAL_AVX2(cpu_flags)) {
+ init_fpel(1, 1, 32, avg, avx2);
+ init_fpel(0, 1, 64, avg, avx2);
+
+#if ARCH_X86_64 && HAVE_AVX2_EXTERNAL
+ init_subpel2_32_64(0, 1, 1, hv, put, avx2);
+ init_subpel2_32_64(0, 0, 1, v, put, avx2);
+ init_subpel2_32_64(0, 1, 0, h, put, avx2);
+ init_subpel2_32_64(1, 1, 1, hv, avg, avx2);
+ init_subpel2_32_64(1, 0, 1, v, avg, avx2);
+ init_subpel2_32_64(1, 1, 0, h, avg, avx2);
+#endif /* ARCH_X86_64 && HAVE_AVX2_EXTERNAL */
+ }
+
#undef init_fpel
#undef init_subpel1
#undef init_subpel2
diff --git a/libavcodec/x86/vp9mc.asm b/libavcodec/x86/vp9mc.asm
index 41b2220..4f66ea1 100644
--- a/libavcodec/x86/vp9mc.asm
+++ b/libavcodec/x86/vp9mc.asm
@@ -22,17 +22,17 @@
%include "libavutil/x86/x86util.asm"
-SECTION_RODATA
+SECTION_RODATA 32
cextern pw_256
%macro F8_TAPS 8
-times 8 db %1, %2
-times 8 db %3, %4
-times 8 db %5, %6
-times 8 db %7, %8
+times 16 db %1, %2
+times 16 db %3, %4
+times 16 db %5, %6
+times 16 db %7, %8
%endmacro
-; int8_t ff_filters_ssse3[3][15][4][16]
+; int8_t ff_filters_ssse3[3][15][4][32]
const filters_ssse3 ; smooth
F8_TAPS -3, -1, 32, 64, 38, 1, -3, 0
F8_TAPS -2, -2, 29, 63, 41, 2, -3, 0
@@ -90,9 +90,9 @@ cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filt
mova m6, [pw_256]
mova m7, [filteryq+ 0]
%if ARCH_X86_64 && mmsize > 8
- mova m8, [filteryq+16]
- mova m9, [filteryq+32]
- mova m10, [filteryq+48]
+ mova m8, [filteryq+32]
+ mova m9, [filteryq+64]
+ mova m10, [filteryq+96]
%endif
.loop:
movh m0, [srcq-3]
@@ -114,9 +114,9 @@ cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 11, dst, src, dstride, sstride, h, filt
pmaddubsw m4, m9
pmaddubsw m1, m10
%else
- pmaddubsw m2, [filteryq+16]
- pmaddubsw m4, [filteryq+32]
- pmaddubsw m1, [filteryq+48]
+ pmaddubsw m2, [filteryq+32]
+ pmaddubsw m4, [filteryq+64]
+ pmaddubsw m1, [filteryq+96]
%endif
paddw m0, m2
paddw m4, m1
@@ -150,9 +150,9 @@ filter_h_fn avg
cglobal vp9_%1_8tap_1d_h_ %+ %%px, 6, 6, 14, dst, src, dstride, sstride, h, filtery
mova m13, [pw_256]
mova m8, [filteryq+ 0]
- mova m9, [filteryq+16]
- mova m10, [filteryq+32]
- mova m11, [filteryq+48]
+ mova m9, [filteryq+32]
+ mova m10, [filteryq+64]
+ mova m11, [filteryq+96]
.loop:
movu m0, [srcq-3]
movu m1, [srcq-2]
@@ -198,6 +198,12 @@ INIT_XMM ssse3
filter_hx2_fn put
filter_hx2_fn avg
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+filter_hx2_fn put
+filter_hx2_fn avg
+%endif
+
%endif ; ARCH_X86_64
%macro filter_v_fn 1
@@ -215,9 +221,9 @@ cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery
sub srcq, sstride3q
mova m7, [filteryq+ 0]
%if ARCH_X86_64 && mmsize > 8
- mova m8, [filteryq+16]
- mova m9, [filteryq+32]
- mova m10, [filteryq+48]
+ mova m8, [filteryq+32]
+ mova m9, [filteryq+64]
+ mova m10, [filteryq+96]
%endif
.loop:
; FIXME maybe reuse loads from previous rows, or just more generally
@@ -242,9 +248,9 @@ cglobal vp9_%1_8tap_1d_v_ %+ %%px, 4, 7, 11, dst, src, dstride, sstride, filtery
pmaddubsw m4, m9
pmaddubsw m1, m10
%else
- pmaddubsw m2, [filteryq+16]
- pmaddubsw m4, [filteryq+32]
- pmaddubsw m1, [filteryq+48]
+ pmaddubsw m2, [filteryq+32]
+ pmaddubsw m4, [filteryq+64]
+ pmaddubsw m1, [filteryq+96]
%endif
paddw m0, m2
paddw m4, m1
@@ -282,9 +288,9 @@ cglobal vp9_%1_8tap_1d_v_ %+ %%px, 6, 8, 14, dst, src, dstride, sstride, h, filt
lea src4q, [srcq+sstrideq]
sub srcq, sstride3q
mova m8, [filteryq+ 0]
- mova m9, [filteryq+16]
- mova m10, [filteryq+32]
- mova m11, [filteryq+48]
+ mova m9, [filteryq+32]
+ mova m10, [filteryq+64]
+ mova m11, [filteryq+96]
.loop:
; FIXME maybe reuse loads from previous rows, or just
; more generally unroll this to prevent multiple loads of
@@ -334,6 +340,12 @@ INIT_XMM ssse3
filter_vx2_fn put
filter_vx2_fn avg
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+filter_vx2_fn put
+filter_vx2_fn avg
+%endif
+
%endif ; ARCH_X86_64
%macro fpel_fn 6
@@ -345,7 +357,7 @@ filter_vx2_fn avg
%define %%dstfn mova
%endif
-%if %2 <= 16
+%if %2 <= mmsize
cglobal vp9_%1%2, 5, 7, 4, dst, src, dstride, sstride, h, dstride3, sstride3
lea sstride3q, [sstrideq*3]
lea dstride3q, [dstrideq*3]
@@ -376,6 +388,8 @@ cglobal vp9_%1%2, 5, 5, 4, dst, src, dstride, sstride, h
%define d16 16
%define s16 16
+%define d32 32
+%define s32 32
INIT_MMX mmx
fpel_fn put, 4, strideq, strideq*2, stride3q, 4
fpel_fn put, 8, strideq, strideq*2, stride3q, 4
@@ -390,5 +404,15 @@ INIT_XMM sse2
fpel_fn avg, 16, strideq, strideq*2, stride3q, 4
fpel_fn avg, 32, mmsize, strideq, strideq+mmsize, 2
fpel_fn avg, 64, mmsize, mmsize*2, mmsize*3, 1
+INIT_YMM avx
+fpel_fn put, 32, strideq, strideq*2, stride3q, 4
+fpel_fn put, 64, mmsize, strideq, strideq+mmsize, 2
+%if HAVE_AVX2_EXTERNAL
+INIT_YMM avx2
+fpel_fn avg, 32, strideq, strideq*2, stride3q, 4
+fpel_fn avg, 64, mmsize, strideq, strideq+mmsize, 2
+%endif
%undef s16
%undef d16
+%undef s32
+%undef d32
More information about the ffmpeg-cvslog
mailing list