[FFmpeg-devel] [PATCH] avcodec/mips: [loongson] mmi optimizations for VP9 put and avg functions
gxw
guxiwei-hf at loongson.cn
Tue Feb 19 05:02:28 EET 2019
VP9 decoding speed improved about 109.3%(from 32fps to 67fps, tested on loongson 3A3000).
---
libavcodec/mips/Makefile | 1 +
libavcodec/mips/vp9_mc_mmi.c | 680 +++++++++++++++++++++++++++++++++++++
libavcodec/mips/vp9dsp_init_mips.c | 42 +++
libavcodec/mips/vp9dsp_mips.h | 50 +++
4 files changed, 773 insertions(+)
create mode 100644 libavcodec/mips/vp9_mc_mmi.c
diff --git a/libavcodec/mips/Makefile b/libavcodec/mips/Makefile
index c827649..c5b54d5 100644
--- a/libavcodec/mips/Makefile
+++ b/libavcodec/mips/Makefile
@@ -88,3 +88,4 @@ MMI-OBJS-$(CONFIG_VC1_DECODER) += mips/vc1dsp_mmi.o
MMI-OBJS-$(CONFIG_WMV2DSP) += mips/wmv2dsp_mmi.o
MMI-OBJS-$(CONFIG_HEVC_DECODER) += mips/hevcdsp_mmi.o
MMI-OBJS-$(CONFIG_VP3DSP) += mips/vp3dsp_idct_mmi.o
+MMI-OBJS-$(CONFIG_VP9_DECODER) += mips/vp9_mc_mmi.o
diff --git a/libavcodec/mips/vp9_mc_mmi.c b/libavcodec/mips/vp9_mc_mmi.c
new file mode 100644
index 0000000..145bbff
--- /dev/null
+++ b/libavcodec/mips/vp9_mc_mmi.c
@@ -0,0 +1,680 @@
+/*
+ * Copyright (c) 2019 gxw <guxiwei-hf at loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/vp9dsp.h"
+#include "libavutil/mips/mmiutils.h"
+#include "vp9dsp_mips.h"
+
+#define GET_DATA_H_MMI \
+ "pmaddhw %[ftmp4], %[ftmp4], %[filter1] \n\t" \
+ "pmaddhw %[ftmp5], %[ftmp5], %[filter2] \n\t" \
+ "paddw %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
+ "punpckhwd %[ftmp5], %[ftmp4], %[ftmp0] \n\t" \
+ "paddw %[ftmp4], %[ftmp4], %[ftmp5] \n\t" \
+ "pmaddhw %[ftmp6], %[ftmp6], %[filter1] \n\t" \
+ "pmaddhw %[ftmp7], %[ftmp7], %[filter2] \n\t" \
+ "paddw %[ftmp6], %[ftmp6], %[ftmp7] \n\t" \
+ "punpckhwd %[ftmp7], %[ftmp6], %[ftmp0] \n\t" \
+ "paddw %[ftmp6], %[ftmp6], %[ftmp7] \n\t" \
+ "punpcklwd %[srcl], %[ftmp4], %[ftmp6] \n\t" \
+ "pmaddhw %[ftmp8], %[ftmp8], %[filter1] \n\t" \
+ "pmaddhw %[ftmp9], %[ftmp9], %[filter2] \n\t" \
+ "paddw %[ftmp8], %[ftmp8], %[ftmp9] \n\t" \
+ "punpckhwd %[ftmp9], %[ftmp8], %[ftmp0] \n\t" \
+ "paddw %[ftmp8], %[ftmp8], %[ftmp9] \n\t" \
+ "pmaddhw %[ftmp10], %[ftmp10], %[filter1] \n\t" \
+ "pmaddhw %[ftmp11], %[ftmp11], %[filter2] \n\t" \
+ "paddw %[ftmp10], %[ftmp10], %[ftmp11] \n\t" \
+ "punpckhwd %[ftmp11], %[ftmp10], %[ftmp0] \n\t" \
+ "paddw %[ftmp10], %[ftmp10], %[ftmp11] \n\t" \
+ "punpcklwd %[srch], %[ftmp8], %[ftmp10] \n\t"
+
+#define GET_DATA_V_MMI \
+ "punpcklhw %[srcl], %[ftmp4], %[ftmp5] \n\t" \
+ "pmaddhw %[srcl], %[srcl], %[filter10] \n\t" \
+ "punpcklhw %[ftmp12], %[ftmp6], %[ftmp7] \n\t" \
+ "pmaddhw %[ftmp12], %[ftmp12], %[filter32] \n\t" \
+ "paddw %[srcl], %[srcl], %[ftmp12] \n\t" \
+ "punpcklhw %[ftmp12], %[ftmp8], %[ftmp9] \n\t" \
+ "pmaddhw %[ftmp12], %[ftmp12], %[filter54] \n\t" \
+ "paddw %[srcl], %[srcl], %[ftmp12] \n\t" \
+ "punpcklhw %[ftmp12], %[ftmp10], %[ftmp11] \n\t" \
+ "pmaddhw %[ftmp12], %[ftmp12], %[filter76] \n\t" \
+ "paddw %[srcl], %[srcl], %[ftmp12] \n\t" \
+ "punpckhhw %[srch], %[ftmp4], %[ftmp5] \n\t" \
+ "pmaddhw %[srch], %[srch], %[filter10] \n\t" \
+ "punpckhhw %[ftmp12], %[ftmp6], %[ftmp7] \n\t" \
+ "pmaddhw %[ftmp12], %[ftmp12], %[filter32] \n\t" \
+ "paddw %[srch], %[srch], %[ftmp12] \n\t" \
+ "punpckhhw %[ftmp12], %[ftmp8], %[ftmp9] \n\t" \
+ "pmaddhw %[ftmp12], %[ftmp12], %[filter54] \n\t" \
+ "paddw %[srch], %[srch], %[ftmp12] \n\t" \
+ "punpckhhw %[ftmp12], %[ftmp10], %[ftmp11] \n\t" \
+ "pmaddhw %[ftmp12], %[ftmp12], %[filter76] \n\t" \
+ "paddw %[srch], %[srch], %[ftmp12] \n\t"
+
+#define ROUND_POWER_OF_TWO_MMI \
+ /* Add 64 */ \
+ "li %[tmp0], 0x40 \n\t" \
+ "dmtc1 %[tmp0], %[ftmp6] \n\t" \
+ "punpcklwd %[ftmp6], %[ftmp6], %[ftmp6] \n\t" \
+ "paddw %[srcl], %[srcl], %[ftmp6] \n\t" \
+ "paddw %[srch], %[srch], %[ftmp6] \n\t" \
+ /* Arithmetic right shift 7 bits */ \
+ "li %[tmp0], 0x07 \n\t" \
+ "dmtc1 %[tmp0], %[ftmp5] \n\t" \
+ "psraw %[srcl], %[srcl], %[ftmp5] \n\t" \
+ "psraw %[srch], %[srch], %[ftmp5] \n\t"
+
+#define CLIP_PIXEL_MMI \
+ /* Staturated operation */ \
+ "packsswh %[srcl], %[srcl], %[srch] \n\t" \
+ "packushb %[ftmp12], %[srcl], %[ftmp0] \n\t"
+
+static void convolve_horiz_mmi(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const uint16_t *filter_x, int32_t w,
+ int32_t h)
+{
+ double ftmp[14];
+ uint32_t tmp[2];
+ src -= 3;
+ src_stride -= w;
+ dst_stride -= w;
+ __asm__ volatile (
+ "move %[tmp1], %[width] \n\t"
+ "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "gsldlc1 %[filter1], 0x03(%[filter]) \n\t"
+ "gsldrc1 %[filter1], 0x00(%[filter]) \n\t"
+ "gsldlc1 %[filter2], 0x0b(%[filter]) \n\t"
+ "gsldrc1 %[filter2], 0x08(%[filter]) \n\t"
+ "1: \n\t"
+ /* Get 8 data per row */
+ "gsldlc1 %[ftmp5], 0x07(%[src]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[src]) \n\t"
+ "gsldlc1 %[ftmp7], 0x08(%[src]) \n\t"
+ "gsldrc1 %[ftmp7], 0x01(%[src]) \n\t"
+ "gsldlc1 %[ftmp9], 0x09(%[src]) \n\t"
+ "gsldrc1 %[ftmp9], 0x02(%[src]) \n\t"
+ "gsldlc1 %[ftmp11], 0x0A(%[src]) \n\t"
+ "gsldrc1 %[ftmp11], 0x03(%[src]) \n\t"
+ "punpcklbh %[ftmp4], %[ftmp5], %[ftmp0] \n\t"
+ "punpckhbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp6], %[ftmp7], %[ftmp0] \n\t"
+ "punpckhbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp8], %[ftmp9], %[ftmp0] \n\t"
+ "punpckhbh %[ftmp9], %[ftmp9], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp10], %[ftmp11], %[ftmp0] \n\t"
+ "punpckhbh %[ftmp11], %[ftmp11], %[ftmp0] \n\t"
+ PTR_ADDIU "%[width], %[width], -0x04 \n\t"
+ /* Get raw data */
+ GET_DATA_H_MMI
+ ROUND_POWER_OF_TWO_MMI
+ CLIP_PIXEL_MMI
+ "swc1 %[ftmp12], 0x00(%[dst]) \n\t"
+ PTR_ADDIU "%[dst], %[dst], 0x04 \n\t"
+ PTR_ADDIU "%[src], %[src], 0x04 \n\t"
+ /* Loop count */
+ "bnez %[width], 1b \n\t"
+ "move %[width], %[tmp1] \n\t"
+ PTR_ADDU "%[src], %[src], %[src_stride] \n\t"
+ PTR_ADDU "%[dst], %[dst], %[dst_stride] \n\t"
+ PTR_ADDIU "%[height], %[height], -0x01 \n\t"
+ "bnez %[height], 1b \n\t"
+ : [srcl]"=&f"(ftmp[0]), [srch]"=&f"(ftmp[1]),
+ [filter1]"=&f"(ftmp[2]), [filter2]"=&f"(ftmp[3]),
+ [ftmp0]"=&f"(ftmp[4]), [ftmp4]"=&f"(ftmp[5]),
+ [ftmp5]"=&f"(ftmp[6]), [ftmp6]"=&f"(ftmp[7]),
+ [ftmp7]"=&f"(ftmp[8]), [ftmp8]"=&f"(ftmp[9]),
+ [ftmp9]"=&f"(ftmp[10]), [ftmp10]"=&f"(ftmp[11]),
+ [ftmp11]"=&f"(ftmp[12]), [ftmp12]"=&f"(ftmp[13]),
+ [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]),
+ [src]"+&r"(src), [width]"+&r"(w),
+ [dst]"+&r"(dst), [height]"+&r"(h)
+ : [filter]"r"(filter_x),
+ [src_stride]"r"((mips_reg)src_stride),
+ [dst_stride]"r"((mips_reg)dst_stride)
+ : "memory"
+ );
+}
+
+static void convolve_vert_mmi(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int16_t *filter_y, int32_t w,
+ int32_t h)
+{
+ double ftmp[16];
+ uint32_t tmp[1];
+ ptrdiff_t addr = src_stride;
+ src_stride -= w;
+ dst_stride -= w;
+
+ __asm__ volatile (
+ "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "gsldlc1 %[ftmp4], 0x03(%[filter]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[filter]) \n\t"
+ "gsldlc1 %[ftmp5], 0x0b(%[filter]) \n\t"
+ "gsldrc1 %[ftmp5], 0x08(%[filter]) \n\t"
+ "punpcklwd %[filter10], %[ftmp4], %[ftmp4] \n\t"
+ "punpckhwd %[filter32], %[ftmp4], %[ftmp4] \n\t"
+ "punpcklwd %[filter54], %[ftmp5], %[ftmp5] \n\t"
+ "punpckhwd %[filter76], %[ftmp5], %[ftmp5] \n\t"
+ "1: \n\t"
+ /* Get 8 data per column */
+ "gsldlc1 %[ftmp4], 0x07(%[src]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[src]) \n\t"
+ PTR_ADDU "%[tmp0], %[src], %[addr] \n\t"
+ "gsldlc1 %[ftmp5], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp6], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp6], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp7], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp8], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp9], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp9], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp10], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp10], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp11], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp11], 0x00(%[tmp0]) \n\t"
+ "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp8], %[ftmp8], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp9], %[ftmp9], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp10], %[ftmp10], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp11], %[ftmp11], %[ftmp0] \n\t"
+ PTR_ADDIU "%[width], %[width], -0x04 \n\t"
+ /* Get raw data */
+ GET_DATA_V_MMI
+ ROUND_POWER_OF_TWO_MMI
+ CLIP_PIXEL_MMI
+ "swc1 %[ftmp12], 0x00(%[dst]) \n\t"
+ PTR_ADDIU "%[dst], %[dst], 0x04 \n\t"
+ PTR_ADDIU "%[src], %[src], 0x04 \n\t"
+ /* Loop count */
+ "bnez %[width], 1b \n\t"
+ PTR_SUBU "%[width], %[addr], %[src_stride] \n\t"
+ PTR_ADDU "%[src], %[src], %[src_stride] \n\t"
+ PTR_ADDU "%[dst], %[dst], %[dst_stride] \n\t"
+ PTR_ADDIU "%[height], %[height], -0x01 \n\t"
+ "bnez %[height], 1b \n\t"
+ : [srcl]"=&f"(ftmp[0]), [srch]"=&f"(ftmp[1]),
+ [filter10]"=&f"(ftmp[2]), [filter32]"=&f"(ftmp[3]),
+ [filter54]"=&f"(ftmp[4]), [filter76]"=&f"(ftmp[5]),
+ [ftmp0]"=&f"(ftmp[6]), [ftmp4]"=&f"(ftmp[7]),
+ [ftmp5]"=&f"(ftmp[8]), [ftmp6]"=&f"(ftmp[9]),
+ [ftmp7]"=&f"(ftmp[10]), [ftmp8]"=&f"(ftmp[11]),
+ [ftmp9]"=&f"(ftmp[12]), [ftmp10]"=&f"(ftmp[13]),
+ [ftmp11]"=&f"(ftmp[14]), [ftmp12]"=&f"(ftmp[15]),
+ [src]"+&r"(src), [dst]"+&r"(dst),
+ [width]"+&r"(w), [height]"+&r"(h),
+ [tmp0]"=&r"(tmp[0])
+ : [filter]"r"(filter_y),
+ [src_stride]"r"((mips_reg)src_stride),
+ [dst_stride]"r"((mips_reg)dst_stride),
+ [addr]"r"((mips_reg)addr)
+ : "memory"
+ );
+}
+
+static void convolve_avg_horiz_mmi(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const uint16_t *filter_x, int32_t w,
+ int32_t h)
+{
+ double ftmp[14];
+ uint32_t tmp[2];
+ src -= 3;
+ src_stride -= w;
+ dst_stride -= w;
+
+ __asm__ volatile (
+ "move %[tmp1], %[width] \n\t"
+ "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "gsldlc1 %[filter1], 0x03(%[filter]) \n\t"
+ "gsldrc1 %[filter1], 0x00(%[filter]) \n\t"
+ "gsldlc1 %[filter2], 0x0b(%[filter]) \n\t"
+ "gsldrc1 %[filter2], 0x08(%[filter]) \n\t"
+ "1: \n\t"
+ /* Get 8 data per row */
+ "gsldlc1 %[ftmp5], 0x07(%[src]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[src]) \n\t"
+ "gsldlc1 %[ftmp7], 0x08(%[src]) \n\t"
+ "gsldrc1 %[ftmp7], 0x01(%[src]) \n\t"
+ "gsldlc1 %[ftmp9], 0x09(%[src]) \n\t"
+ "gsldrc1 %[ftmp9], 0x02(%[src]) \n\t"
+ "gsldlc1 %[ftmp11], 0x0A(%[src]) \n\t"
+ "gsldrc1 %[ftmp11], 0x03(%[src]) \n\t"
+ "punpcklbh %[ftmp4], %[ftmp5], %[ftmp0] \n\t"
+ "punpckhbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp6], %[ftmp7], %[ftmp0] \n\t"
+ "punpckhbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp8], %[ftmp9], %[ftmp0] \n\t"
+ "punpckhbh %[ftmp9], %[ftmp9], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp10], %[ftmp11], %[ftmp0] \n\t"
+ "punpckhbh %[ftmp11], %[ftmp11], %[ftmp0] \n\t"
+ PTR_ADDIU "%[width], %[width], -0x04 \n\t"
+ /* Get raw data */
+ GET_DATA_H_MMI
+ ROUND_POWER_OF_TWO_MMI
+ CLIP_PIXEL_MMI
+ "punpcklbh %[ftmp12], %[ftmp12], %[ftmp0] \n\t"
+ "gsldlc1 %[ftmp4], 0x07(%[dst]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[dst]) \n\t"
+ "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
+ "paddh %[ftmp12], %[ftmp12], %[ftmp4] \n\t"
+ "li %[tmp0], 0x10001 \n\t"
+ "dmtc1 %[tmp0], %[ftmp5] \n\t"
+ "punpcklhw %[ftmp5], %[ftmp5], %[ftmp5] \n\t"
+ "paddh %[ftmp12], %[ftmp12], %[ftmp5] \n\t"
+ "psrah %[ftmp12], %[ftmp12], %[ftmp5] \n\t"
+ "packushb %[ftmp12], %[ftmp12], %[ftmp0] \n\t"
+ "swc1 %[ftmp12], 0x00(%[dst]) \n\t"
+ PTR_ADDIU "%[dst], %[dst], 0x04 \n\t"
+ PTR_ADDIU "%[src], %[src], 0x04 \n\t"
+ /* Loop count */
+ "bnez %[width], 1b \n\t"
+ "move %[width], %[tmp1] \n\t"
+ PTR_ADDU "%[src], %[src], %[src_stride] \n\t"
+ PTR_ADDU "%[dst], %[dst], %[dst_stride] \n\t"
+ PTR_ADDIU "%[height], %[height], -0x01 \n\t"
+ "bnez %[height], 1b \n\t"
+ : [srcl]"=&f"(ftmp[0]), [srch]"=&f"(ftmp[1]),
+ [filter1]"=&f"(ftmp[2]), [filter2]"=&f"(ftmp[3]),
+ [ftmp0]"=&f"(ftmp[4]), [ftmp4]"=&f"(ftmp[5]),
+ [ftmp5]"=&f"(ftmp[6]), [ftmp6]"=&f"(ftmp[7]),
+ [ftmp7]"=&f"(ftmp[8]), [ftmp8]"=&f"(ftmp[9]),
+ [ftmp9]"=&f"(ftmp[10]), [ftmp10]"=&f"(ftmp[11]),
+ [ftmp11]"=&f"(ftmp[12]), [ftmp12]"=&f"(ftmp[13]),
+ [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]),
+ [src]"+&r"(src), [width]"+&r"(w),
+ [dst]"+&r"(dst), [height]"+&r"(h)
+ : [filter]"r"(filter_x),
+ [src_stride]"r"((mips_reg)src_stride),
+ [dst_stride]"r"((mips_reg)dst_stride)
+ : "memory"
+ );
+}
+
+static void convolve_avg_vert_mmi(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int16_t *filter_y, int32_t w,
+ int32_t h)
+{
+ double ftmp[16];
+ uint32_t tmp[1];
+ ptrdiff_t addr = src_stride;
+ src_stride -= w;
+ dst_stride -= w;
+
+ __asm__ volatile (
+ "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "gsldlc1 %[ftmp4], 0x03(%[filter]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[filter]) \n\t"
+ "gsldlc1 %[ftmp5], 0x0b(%[filter]) \n\t"
+ "gsldrc1 %[ftmp5], 0x08(%[filter]) \n\t"
+ "punpcklwd %[filter10], %[ftmp4], %[ftmp4] \n\t"
+ "punpckhwd %[filter32], %[ftmp4], %[ftmp4] \n\t"
+ "punpcklwd %[filter54], %[ftmp5], %[ftmp5] \n\t"
+ "punpckhwd %[filter76], %[ftmp5], %[ftmp5] \n\t"
+ "1: \n\t"
+ /* Get 8 data per column */
+ "gsldlc1 %[ftmp4], 0x07(%[src]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[src]) \n\t"
+ PTR_ADDU "%[tmp0], %[src], %[addr] \n\t"
+ "gsldlc1 %[ftmp5], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp5], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp6], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp6], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp7], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp7], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp8], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp8], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp9], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp9], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp10], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp10], 0x00(%[tmp0]) \n\t"
+ PTR_ADDU "%[tmp0], %[tmp0], %[addr] \n\t"
+ "gsldlc1 %[ftmp11], 0x07(%[tmp0]) \n\t"
+ "gsldrc1 %[ftmp11], 0x00(%[tmp0]) \n\t"
+ "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp5], %[ftmp5], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp6], %[ftmp6], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp7], %[ftmp7], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp8], %[ftmp8], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp9], %[ftmp9], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp10], %[ftmp10], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp11], %[ftmp11], %[ftmp0] \n\t"
+ PTR_ADDIU "%[width], %[width], -0x04 \n\t"
+ /* Get raw data */
+ GET_DATA_V_MMI
+ ROUND_POWER_OF_TWO_MMI
+ CLIP_PIXEL_MMI
+ "punpcklbh %[ftmp12], %[ftmp12], %[ftmp0] \n\t"
+ "gsldlc1 %[ftmp4], 0x07(%[dst]) \n\t"
+ "gsldrc1 %[ftmp4], 0x00(%[dst]) \n\t"
+ "punpcklbh %[ftmp4], %[ftmp4], %[ftmp0] \n\t"
+ "paddh %[ftmp12], %[ftmp12], %[ftmp4] \n\t"
+ "li %[tmp0], 0x10001 \n\t"
+ "dmtc1 %[tmp0], %[ftmp5] \n\t"
+ "punpcklhw %[ftmp5], %[ftmp5], %[ftmp5] \n\t"
+ "paddh %[ftmp12], %[ftmp12], %[ftmp5] \n\t"
+ "psrah %[ftmp12], %[ftmp12], %[ftmp5] \n\t"
+ "packushb %[ftmp12], %[ftmp12], %[ftmp0] \n\t"
+ "swc1 %[ftmp12], 0x00(%[dst]) \n\t"
+ PTR_ADDIU "%[dst], %[dst], 0x04 \n\t"
+ PTR_ADDIU "%[src], %[src], 0x04 \n\t"
+ /* Loop count */
+ "bnez %[width], 1b \n\t"
+ PTR_SUBU "%[width], %[addr], %[src_stride] \n\t"
+ PTR_ADDU "%[src], %[src], %[src_stride] \n\t"
+ PTR_ADDU "%[dst], %[dst], %[dst_stride] \n\t"
+ PTR_ADDIU "%[height], %[height], -0x01 \n\t"
+ "bnez %[height], 1b \n\t"
+ : [srcl]"=&f"(ftmp[0]), [srch]"=&f"(ftmp[1]),
+ [filter10]"=&f"(ftmp[2]), [filter32]"=&f"(ftmp[3]),
+ [filter54]"=&f"(ftmp[4]), [filter76]"=&f"(ftmp[5]),
+ [ftmp0]"=&f"(ftmp[6]), [ftmp4]"=&f"(ftmp[7]),
+ [ftmp5]"=&f"(ftmp[8]), [ftmp6]"=&f"(ftmp[9]),
+ [ftmp7]"=&f"(ftmp[10]), [ftmp8]"=&f"(ftmp[11]),
+ [ftmp9]"=&f"(ftmp[12]), [ftmp10]"=&f"(ftmp[13]),
+ [ftmp11]"=&f"(ftmp[14]), [ftmp12]"=&f"(ftmp[15]),
+ [src]"+&r"(src), [dst]"+&r"(dst),
+ [width]"+&r"(w), [height]"+&r"(h),
+ [tmp0]"=&r"(tmp[0])
+ : [filter]"r"(filter_y),
+ [src_stride]"r"((mips_reg)src_stride),
+ [dst_stride]"r"((mips_reg)dst_stride),
+ [addr]"r"((mips_reg)addr)
+ : "memory"
+ );
+}
+
+static void convolve_avg_mmi(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t w, int32_t h)
+{
+ double ftmp[4];
+ uint32_t tmp[2];
+ src_stride -= w;
+ dst_stride -= w;
+
+ __asm__ volatile (
+ "move %[tmp1], %[width] \n\t"
+ "xor %[ftmp0], %[ftmp0], %[ftmp0] \n\t"
+ "li %[tmp0], 0x10001 \n\t"
+ "dmtc1 %[tmp0], %[ftmp3] \n\t"
+ "punpcklhw %[ftmp3], %[ftmp3], %[ftmp3] \n\t"
+ "1: \n\t"
+ "gsldlc1 %[ftmp1], 0x07(%[src]) \n\t"
+ "gsldrc1 %[ftmp1], 0x00(%[src]) \n\t"
+ "gsldlc1 %[ftmp2], 0x07(%[dst]) \n\t"
+ "gsldrc1 %[ftmp2], 0x00(%[dst]) \n\t"
+ "punpcklbh %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
+ "punpcklbh %[ftmp2], %[ftmp2], %[ftmp0] \n\t"
+ "paddh %[ftmp1], %[ftmp1], %[ftmp2] \n\t"
+ "paddh %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
+ "psrah %[ftmp1], %[ftmp1], %[ftmp3] \n\t"
+ "packushb %[ftmp1], %[ftmp1], %[ftmp0] \n\t"
+ "swc1 %[ftmp1], 0x00(%[dst]) \n\t"
+ PTR_ADDIU "%[width], %[width], -0x04 \n\t"
+ PTR_ADDIU "%[dst], %[dst], 0x04 \n\t"
+ PTR_ADDIU "%[src], %[src], 0x04 \n\t"
+ "bnez %[width], 1b \n\t"
+ "move %[width], %[tmp1] \n\t"
+ PTR_ADDU "%[dst], %[dst], %[dst_stride] \n\t"
+ PTR_ADDU "%[src], %[src], %[src_stride] \n\t"
+ PTR_ADDIU "%[height], %[height], -0x01 \n\t"
+ "bnez %[height], 1b \n\t"
+ : [ftmp0]"=&f"(ftmp[0]), [ftmp1]"=&f"(ftmp[1]),
+ [ftmp2]"=&f"(ftmp[2]), [ftmp3]"=&f"(ftmp[3]),
+ [tmp0]"=&r"(tmp[0]), [tmp1]"=&r"(tmp[1]),
+ [src]"+&r"(src), [dst]"+&r"(dst),
+ [width]"+&r"(w), [height]"+&r"(h)
+ : [src_stride]"r"((mips_reg)src_stride),
+ [dst_stride]"r"((mips_reg)dst_stride)
+ : "memory"
+ );
+}
+
+#define VP9_COMMON_MIPS_MMI_FUNC(SIZE) \
+static void common_hz_8t_##SIZE##w_mmi(const uint8_t *src, int32_t src_stride, \
+ uint8_t *dst, int32_t dst_stride, \
+ const int16_t *filter, int32_t h) \
+{ \
+ convolve_horiz_mmi(src, src_stride, dst, dst_stride, filter, SIZE, h); \
+} \
+ \
+static void common_vt_8t_##SIZE##w_mmi(const uint8_t *src, int32_t src_stride, \
+ uint8_t *dst, int32_t dst_stride, \
+ const int16_t *filter, int32_t h) \
+{ \
+ src -= (3 * src_stride); \
+ convolve_vert_mmi(src, src_stride, dst, dst_stride, filter, SIZE, h); \
+} \
+ \
+static void common_hv_8ht_8vt_##SIZE##w_mmi(const uint8_t *src, \
+ int32_t src_stride, uint8_t *dst, \
+ int32_t dst_stride, \
+ const int16_t *filter_horiz, \
+ const int16_t *filter_vert, \
+ int32_t h) \
+{ \
+ int tmp_h = h + 7; \
+ uint8_t temp[64 * 71]; \
+ src -= (3 * src_stride); \
+ convolve_horiz_mmi(src, src_stride, temp, 64, filter_horiz, SIZE, tmp_h); \
+ convolve_vert_mmi(temp, 64, dst, dst_stride, filter_vert, SIZE, h); \
+} \
+ \
+static void common_hz_8t_and_aver_dst_##SIZE##w_mmi(const uint8_t *src, \
+ int32_t src_stride, \
+ uint8_t *dst, \
+ int32_t dst_stride, \
+ const int16_t *filter, \
+ int32_t h) \
+{ \
+ convolve_avg_horiz_mmi(src, src_stride, dst, dst_stride, filter, SIZE, h); \
+} \
+ \
+static void common_vt_8t_and_aver_dst_##SIZE##w_mmi(const uint8_t *src, \
+ int32_t src_stride, \
+ uint8_t *dst, \
+ int32_t dst_stride, \
+ const int16_t *filter, \
+ int32_t h) \
+{ \
+ src -= (3 * src_stride); \
+ convolve_avg_vert_mmi(src, src_stride, dst, dst_stride, filter, SIZE, h); \
+} \
+ \
+static void common_hv_8ht_8vt_and_aver_dst_##SIZE##w_mmi(const uint8_t *src, \
+ int32_t src_stride, \
+ uint8_t *dst, \
+ int32_t dst_stride, \
+ const int16_t *filter_horiz, \
+ const int16_t *filter_vert, \
+ int32_t h) \
+{ \
+ uint8_t temp[64 * 64]; \
+ common_hv_8ht_8vt_##SIZE##w_mmi(src, src_stride, temp, 64, \
+ filter_horiz, filter_vert, h); \
+ convolve_avg_mmi(temp, 64, dst, dst_stride, SIZE, h); \
+}
+
+VP9_COMMON_MIPS_MMI_FUNC(64);
+VP9_COMMON_MIPS_MMI_FUNC(32);
+VP9_COMMON_MIPS_MMI_FUNC(16);
+VP9_COMMON_MIPS_MMI_FUNC(8);
+VP9_COMMON_MIPS_MMI_FUNC(4);
+
+static const int16_t vp9_subpel_filters_mmi[3][15][8] = {
+ [FILTER_8TAP_REGULAR] = {
+ {0, 1, -5, 126, 8, -3, 1, 0},
+ {-1, 3, -10, 122, 18, -6, 2, 0},
+ {-1, 4, -13, 118, 27, -9, 3, -1},
+ {-1, 4, -16, 112, 37, -11, 4, -1},
+ {-1, 5, -18, 105, 48, -14, 4, -1},
+ {-1, 5, -19, 97, 58, -16, 5, -1},
+ {-1, 6, -19, 88, 68, -18, 5, -1},
+ {-1, 6, -19, 78, 78, -19, 6, -1},
+ {-1, 5, -18, 68, 88, -19, 6, -1},
+ {-1, 5, -16, 58, 97, -19, 5, -1},
+ {-1, 4, -14, 48, 105, -18, 5, -1},
+ {-1, 4, -11, 37, 112, -16, 4, -1},
+ {-1, 3, -9, 27, 118, -13, 4, -1},
+ {0, 2, -6, 18, 122, -10, 3, -1},
+ {0, 1, -3, 8, 126, -5, 1, 0},
+ }, [FILTER_8TAP_SHARP] = {
+ {-1, 3, -7, 127, 8, -3, 1, 0},
+ {-2, 5, -13, 125, 17, -6, 3, -1},
+ {-3, 7, -17, 121, 27, -10, 5, -2},
+ {-4, 9, -20, 115, 37, -13, 6, -2},
+ {-4, 10, -23, 108, 48, -16, 8, -3},
+ {-4, 10, -24, 100, 59, -19, 9, -3},
+ {-4, 11, -24, 90, 70, -21, 10, -4},
+ {-4, 11, -23, 80, 80, -23, 11, -4},
+ {-4, 10, -21, 70, 90, -24, 11, -4},
+ {-3, 9, -19, 59, 100, -24, 10, -4},
+ {-3, 8, -16, 48, 108, -23, 10, -4},
+ {-2, 6, -13, 37, 115, -20, 9, -4},
+ {-2, 5, -10, 27, 121, -17, 7, -3},
+ {-1, 3, -6, 17, 125, -13, 5, -2},
+ {0, 1, -3, 8, 127, -7, 3, -1},
+ }, [FILTER_8TAP_SMOOTH] = {
+ {-3, -1, 32, 64, 38, 1, -3, 0},
+ {-2, -2, 29, 63, 41, 2, -3, 0},
+ {-2, -2, 26, 63, 43, 4, -4, 0},
+ {-2, -3, 24, 62, 46, 5, -4, 0},
+ {-2, -3, 21, 60, 49, 7, -4, 0},
+ {-1, -4, 18, 59, 51, 9, -4, 0},
+ {-1, -4, 16, 57, 53, 12, -4, -1},
+ {-1, -4, 14, 55, 55, 14, -4, -1},
+ {-1, -4, 12, 53, 57, 16, -4, -1},
+ {0, -4, 9, 51, 59, 18, -4, -1},
+ {0, -4, 7, 49, 60, 21, -3, -2},
+ {0, -4, 5, 46, 62, 24, -3, -2},
+ {0, -4, 4, 43, 63, 26, -2, -2},
+ {0, -3, 2, 41, 63, 29, -2, -2},
+ {0, -3, 1, 38, 64, 32, -1, -3},
+ }
+};
+
+#define VP9_8TAP_MIPS_MMI_FUNC(SIZE, TYPE, TYPE_IDX) \
+void ff_put_8tap_##TYPE##_##SIZE##h_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int16_t *filter = vp9_subpel_filters_mmi[TYPE_IDX][mx-1]; \
+ \
+ common_hz_8t_##SIZE##w_mmi(src, srcstride, dst, dststride, filter, h); \
+} \
+ \
+void ff_put_8tap_##TYPE##_##SIZE##v_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int16_t *filter = vp9_subpel_filters_mmi[TYPE_IDX][my-1]; \
+ \
+ common_vt_8t_##SIZE##w_mmi(src, srcstride, dst, dststride, filter, h); \
+} \
+ \
+void ff_put_8tap_##TYPE##_##SIZE##hv_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const uint16_t *hfilter = vp9_subpel_filters_mmi[TYPE_IDX][mx-1]; \
+ const uint16_t *vfilter = vp9_subpel_filters_mmi[TYPE_IDX][my-1]; \
+ \
+ common_hv_8ht_8vt_##SIZE##w_mmi(src, srcstride, dst, dststride, hfilter, \
+ vfilter, h); \
+} \
+ \
+void ff_avg_8tap_##TYPE##_##SIZE##h_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int16_t *filter = vp9_subpel_filters_mmi[TYPE_IDX][mx-1]; \
+ \
+ common_hz_8t_and_aver_dst_##SIZE##w_mmi(src, srcstride, dst, \
+ dststride, filter, h); \
+} \
+ \
+void ff_avg_8tap_##TYPE##_##SIZE##v_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int16_t *filter = vp9_subpel_filters_mmi[TYPE_IDX][my-1]; \
+ \
+ common_vt_8t_and_aver_dst_##SIZE##w_mmi(src, srcstride, dst, dststride, \
+ filter, h); \
+} \
+ \
+void ff_avg_8tap_##TYPE##_##SIZE##hv_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const uint16_t *hfilter = vp9_subpel_filters_mmi[TYPE_IDX][mx-1]; \
+ const uint16_t *vfilter = vp9_subpel_filters_mmi[TYPE_IDX][my-1]; \
+ \
+ common_hv_8ht_8vt_and_aver_dst_##SIZE##w_mmi(src, srcstride, dst, \
+ dststride, hfilter, \
+ vfilter, h); \
+}
+
+VP9_8TAP_MIPS_MMI_FUNC(64, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_MIPS_MMI_FUNC(32, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_MIPS_MMI_FUNC(16, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_MIPS_MMI_FUNC(8, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_MIPS_MMI_FUNC(4, regular, FILTER_8TAP_REGULAR);
+
+VP9_8TAP_MIPS_MMI_FUNC(64, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_MIPS_MMI_FUNC(32, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_MIPS_MMI_FUNC(16, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_MIPS_MMI_FUNC(8, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_MIPS_MMI_FUNC(4, sharp, FILTER_8TAP_SHARP);
+
+VP9_8TAP_MIPS_MMI_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_MIPS_MMI_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_MIPS_MMI_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_MIPS_MMI_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_MIPS_MMI_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
+
+#undef VP9_8TAP_MIPS_MMI_FUNC
diff --git a/libavcodec/mips/vp9dsp_init_mips.c b/libavcodec/mips/vp9dsp_init_mips.c
index c8a4890..5990fa6 100644
--- a/libavcodec/mips/vp9dsp_init_mips.c
+++ b/libavcodec/mips/vp9dsp_init_mips.c
@@ -168,8 +168,50 @@ static av_cold void vp9dsp_init_msa(VP9DSPContext *dsp, int bpp)
}
#endif // #if HAVE_MSA
+#if HAVE_MMI
+static av_cold void vp9dsp_mc_init_mmi(VP9DSPContext *dsp)
+{
+#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = \
+ ff_##type##_8tap_smooth_##sz##dir##_mmi; \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = \
+ ff_##type##_8tap_regular_##sz##dir##_mmi; \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = \
+ ff_##type##_8tap_sharp_##sz##dir##_mmi;
+
+#define init_subpel2(idx, idxh, idxv, dir, type) \
+ init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
+ init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
+ init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
+ init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
+ init_subpel1(4, idx, idxh, idxv, 4, dir, type)
+
+#define init_subpel3(idx, type) \
+ init_subpel2(idx, 1, 1, hv, type); \
+ init_subpel2(idx, 0, 1, v, type); \
+ init_subpel2(idx, 1, 0, h, type)
+
+ init_subpel3(0, put);
+ init_subpel3(1, avg);
+
+#undef init_subpel1
+#undef init_subpel2
+#undef init_subpel3
+}
+
+static av_cold void vp9dsp_init_mmi(VP9DSPContext *dsp, int bpp)
+{
+ if (bpp == 8) {
+ vp9dsp_mc_init_mmi(dsp);
+ }
+}
+#endif // #if HAVE_MMI
+
av_cold void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp)
{
+#if HAVE_MMI
+ vp9dsp_init_mmi(dsp, bpp);
+#endif // #if HAVE_MMI
#if HAVE_MSA
vp9dsp_init_msa(dsp, bpp);
#endif // #if HAVE_MSA
diff --git a/libavcodec/mips/vp9dsp_mips.h b/libavcodec/mips/vp9dsp_mips.h
index 4d73038..0b6ce7c 100644
--- a/libavcodec/mips/vp9dsp_mips.h
+++ b/libavcodec/mips/vp9dsp_mips.h
@@ -234,4 +234,54 @@ void ff_tm_16x16_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
void ff_tm_32x32_msa(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
const uint8_t *top);
+#define VP9_8TAP_MIPS_MMI_FUNC(SIZE, type, type_idx) \
+void ff_put_8tap_##type##_##SIZE##h_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_put_8tap_##type##_##SIZE##v_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_put_8tap_##type##_##SIZE##hv_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_avg_8tap_##type##_##SIZE##h_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_avg_8tap_##type##_##SIZE##v_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_avg_8tap_##type##_##SIZE##hv_mmi(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my);
+
+VP9_8TAP_MIPS_MMI_FUNC(64, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_MIPS_MMI_FUNC(32, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_MIPS_MMI_FUNC(16, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_MIPS_MMI_FUNC(8, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_MIPS_MMI_FUNC(4, regular, FILTER_8TAP_REGULAR);
+
+VP9_8TAP_MIPS_MMI_FUNC(64, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_MIPS_MMI_FUNC(32, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_MIPS_MMI_FUNC(16, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_MIPS_MMI_FUNC(8, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_MIPS_MMI_FUNC(4, sharp, FILTER_8TAP_SHARP);
+
+VP9_8TAP_MIPS_MMI_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_MIPS_MMI_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_MIPS_MMI_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_MIPS_MMI_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_MIPS_MMI_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
+#undef VP9_8TAP_MIPS_MMI_FUNC
+
#endif // #ifndef AVCODEC_MIPS_VP9DSP_MIPS_H
--
2.1.0
More information about the ffmpeg-devel
mailing list