[FFmpeg-devel] [PATCH] avcodec/mips: MSA (MIPS-SIMD-Arch) optimizations for HEVC uni copy, uni horizontal and uni vertical mc functions

shivraj.patil at imgtec.com shivraj.patil at imgtec.com
Mon May 4 14:21:34 CEST 2015


From: Shivraj Patil <shivraj.patil at imgtec.com>

Signed-off-by: Shivraj Patil <shivraj.patil at imgtec.com>
---
 libavcodec/mips/hevcdsp_init_mips.c |   26 +
 libavcodec/mips/hevcdsp_mips.h      |   40 +
 libavcodec/mips/hevcdsp_msa.c       | 1822 +++++++++++++++++++++++++++++++++++
 libavutil/mips/generic_macros_msa.h |  533 ++++++++++
 4 files changed, 2421 insertions(+)

diff --git a/libavcodec/mips/hevcdsp_init_mips.c b/libavcodec/mips/hevcdsp_init_mips.c
index 4fec336..1e22f35 100644
--- a/libavcodec/mips/hevcdsp_init_mips.c
+++ b/libavcodec/mips/hevcdsp_init_mips.c
@@ -61,6 +61,32 @@ static av_cold void hevc_dsp_init_msa(HEVCDSPContext *c,
         c->put_hevc_qpel[7][1][1] = ff_hevc_put_hevc_qpel_hv32_8_msa;
         c->put_hevc_qpel[8][1][1] = ff_hevc_put_hevc_qpel_hv48_8_msa;
         c->put_hevc_qpel[9][1][1] = ff_hevc_put_hevc_qpel_hv64_8_msa;
+
+        c->put_hevc_qpel_uni[3][0][0] = ff_hevc_put_hevc_uni_pel_pixels8_8_msa;
+        c->put_hevc_qpel_uni[4][0][0] = ff_hevc_put_hevc_uni_pel_pixels12_8_msa;
+        c->put_hevc_qpel_uni[5][0][0] = ff_hevc_put_hevc_uni_pel_pixels16_8_msa;
+        c->put_hevc_qpel_uni[6][0][0] = ff_hevc_put_hevc_uni_pel_pixels24_8_msa;
+        c->put_hevc_qpel_uni[7][0][0] = ff_hevc_put_hevc_uni_pel_pixels32_8_msa;
+        c->put_hevc_qpel_uni[8][0][0] = ff_hevc_put_hevc_uni_pel_pixels48_8_msa;
+        c->put_hevc_qpel_uni[9][0][0] = ff_hevc_put_hevc_uni_pel_pixels64_8_msa;
+
+        c->put_hevc_qpel_uni[1][0][1] = ff_hevc_put_hevc_uni_qpel_h4_8_msa;
+        c->put_hevc_qpel_uni[3][0][1] = ff_hevc_put_hevc_uni_qpel_h8_8_msa;
+        c->put_hevc_qpel_uni[4][0][1] = ff_hevc_put_hevc_uni_qpel_h12_8_msa;
+        c->put_hevc_qpel_uni[5][0][1] = ff_hevc_put_hevc_uni_qpel_h16_8_msa;
+        c->put_hevc_qpel_uni[6][0][1] = ff_hevc_put_hevc_uni_qpel_h24_8_msa;
+        c->put_hevc_qpel_uni[7][0][1] = ff_hevc_put_hevc_uni_qpel_h32_8_msa;
+        c->put_hevc_qpel_uni[8][0][1] = ff_hevc_put_hevc_uni_qpel_h48_8_msa;
+        c->put_hevc_qpel_uni[9][0][1] = ff_hevc_put_hevc_uni_qpel_h64_8_msa;
+
+        c->put_hevc_qpel_uni[1][1][0] = ff_hevc_put_hevc_uni_qpel_v4_8_msa;
+        c->put_hevc_qpel_uni[3][1][0] = ff_hevc_put_hevc_uni_qpel_v8_8_msa;
+        c->put_hevc_qpel_uni[4][1][0] = ff_hevc_put_hevc_uni_qpel_v12_8_msa;
+        c->put_hevc_qpel_uni[5][1][0] = ff_hevc_put_hevc_uni_qpel_v16_8_msa;
+        c->put_hevc_qpel_uni[6][1][0] = ff_hevc_put_hevc_uni_qpel_v24_8_msa;
+        c->put_hevc_qpel_uni[7][1][0] = ff_hevc_put_hevc_uni_qpel_v32_8_msa;
+        c->put_hevc_qpel_uni[8][1][0] = ff_hevc_put_hevc_uni_qpel_v48_8_msa;
+        c->put_hevc_qpel_uni[9][1][0] = ff_hevc_put_hevc_uni_qpel_v64_8_msa;
     }
 }
 #endif  // #if HAVE_MSA
diff --git a/libavcodec/mips/hevcdsp_mips.h b/libavcodec/mips/hevcdsp_mips.h
index 4f7f273..76a6784 100644
--- a/libavcodec/mips/hevcdsp_mips.h
+++ b/libavcodec/mips/hevcdsp_mips.h
@@ -67,3 +67,43 @@ MC(qpel, hv, 48);
 MC(qpel, hv, 64);
 
 #undef MC
+
+#define UNI_MC(PEL, DIR, WIDTH)                                                \
+void ff_hevc_put_hevc_uni_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst,         \
+                                                         ptrdiff_t dst_stride, \
+                                                         uint8_t *src,         \
+                                                         ptrdiff_t src_stride, \
+                                                         int height,           \
+                                                         intptr_t mx,          \
+                                                         intptr_t my,          \
+                                                         int width)
+
+UNI_MC(pel, pixels, 4);
+UNI_MC(pel, pixels, 6);
+UNI_MC(pel, pixels, 8);
+UNI_MC(pel, pixels, 12);
+UNI_MC(pel, pixels, 16);
+UNI_MC(pel, pixels, 24);
+UNI_MC(pel, pixels, 32);
+UNI_MC(pel, pixels, 48);
+UNI_MC(pel, pixels, 64);
+
+UNI_MC(qpel, h, 4);
+UNI_MC(qpel, h, 8);
+UNI_MC(qpel, h, 12);
+UNI_MC(qpel, h, 16);
+UNI_MC(qpel, h, 24);
+UNI_MC(qpel, h, 32);
+UNI_MC(qpel, h, 48);
+UNI_MC(qpel, h, 64);
+
+UNI_MC(qpel, v, 4);
+UNI_MC(qpel, v, 8);
+UNI_MC(qpel, v, 12);
+UNI_MC(qpel, v, 16);
+UNI_MC(qpel, v, 24);
+UNI_MC(qpel, v, 32);
+UNI_MC(qpel, v, 48);
+UNI_MC(qpel, v, 64);
+
+#undef UNI_MC
diff --git a/libavcodec/mips/hevcdsp_msa.c b/libavcodec/mips/hevcdsp_msa.c
index fcc344b..d0e6f64 100644
--- a/libavcodec/mips/hevcdsp_msa.c
+++ b/libavcodec/mips/hevcdsp_msa.c
@@ -2270,6 +2270,1767 @@ static void hevc_hv_8t_64w_msa(uint8_t * __restrict src, int32_t src_stride,
                                filter_x, filter_y, height, 64);
 }
 
+static uint8_t mc_filt_mask_arr[16 * 3] = {
+    /* 8 width cases */
+    0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+    /* 4 width cases */
+    0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
+    /* 4 width cases */
+    8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
+};
+
+#define FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3,                 \
+                            filt0, filt1, filt2, filt3)             \
+( {                                                                 \
+    v8i16 tmp0, tmp1;                                               \
+                                                                    \
+    tmp0 = __msa_dotp_s_h((v16i8) (vec0), (v16i8) (filt0));         \
+    tmp0 = __msa_dpadd_s_h(tmp0, (v16i8) (vec1), (v16i8) (filt1));  \
+    tmp1 = __msa_dotp_s_h((v16i8) (vec2), (v16i8) (filt2));         \
+    tmp1 = __msa_dpadd_s_h(tmp1, (v16i8) (vec3), (v16i8) (filt3));  \
+    tmp0 = __msa_adds_s_h(tmp0, tmp1);                              \
+                                                                    \
+    tmp0;                                                           \
+} )
+
+#define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3,                   \
+                                   mask0, mask1, mask2, mask3,               \
+                                   filt0, filt1, filt2, filt3,               \
+                                   out0, out1)                               \
+{                                                                            \
+    v16i8 vec0_m, vec1_m, vec2_m, vec3_m,  vec4_m, vec5_m, vec6_m, vec7_m;   \
+    v8i16 res0_m, res1_m, res2_m, res3_m;                                    \
+                                                                             \
+    vec0_m = __msa_vshf_b((v16i8) (mask0), (v16i8) (src1), (v16i8) (src0));  \
+    vec1_m = __msa_vshf_b((v16i8) (mask0), (v16i8) (src3), (v16i8) (src2));  \
+                                                                             \
+    res0_m = __msa_dotp_s_h(vec0_m, (v16i8) (filt0));                        \
+    res1_m = __msa_dotp_s_h(vec1_m, (v16i8) (filt0));                        \
+                                                                             \
+    vec2_m = __msa_vshf_b((v16i8) (mask1), (v16i8) (src1), (v16i8) (src0));  \
+    vec3_m = __msa_vshf_b((v16i8) (mask1), (v16i8) (src3), (v16i8) (src2));  \
+                                                                             \
+    res0_m = __msa_dpadd_s_h(res0_m, (filt1), vec2_m);                       \
+    res1_m = __msa_dpadd_s_h(res1_m, (filt1), vec3_m);                       \
+                                                                             \
+    vec4_m = __msa_vshf_b((v16i8) (mask2), (v16i8) (src1), (v16i8) (src0));  \
+    vec5_m = __msa_vshf_b((v16i8) (mask2), (v16i8) (src3), (v16i8) (src2));  \
+                                                                             \
+    res2_m = __msa_dotp_s_h((v16i8) (filt2), vec4_m);                        \
+    res3_m = __msa_dotp_s_h((v16i8) (filt2), vec5_m);                        \
+                                                                             \
+    vec6_m = __msa_vshf_b((v16i8) (mask3), (v16i8) (src1), (v16i8) (src0));  \
+    vec7_m = __msa_vshf_b((v16i8) (mask3), (v16i8) (src3), (v16i8) (src2));  \
+                                                                             \
+    res2_m = __msa_dpadd_s_h(res2_m, (v16i8) (filt3), vec6_m);               \
+    res3_m = __msa_dpadd_s_h(res3_m, (v16i8) (filt3), vec7_m);               \
+                                                                             \
+    out0 = __msa_adds_s_h(res0_m, res2_m);                                   \
+    out1 = __msa_adds_s_h(res1_m, res3_m);                                   \
+}
+
+#define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3,                   \
+                                   mask0, mask1, mask2, mask3,               \
+                                   filt0, filt1, filt2, filt3,               \
+                                   out0, out1, out2, out3)                   \
+{                                                                            \
+    v16i8 vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m;    \
+    v8i16 res0_m, res1_m, res2_m, res3_m, res4_m, res5_m, res6_m, res7_m;    \
+                                                                             \
+    vec0_m = __msa_vshf_b((v16i8) (mask0), (v16i8) (src0), (v16i8) (src0));  \
+    vec1_m = __msa_vshf_b((v16i8) (mask0), (v16i8) (src1), (v16i8) (src1));  \
+    vec2_m = __msa_vshf_b((v16i8) (mask0), (v16i8) (src2), (v16i8) (src2));  \
+    vec3_m = __msa_vshf_b((v16i8) (mask0), (v16i8) (src3), (v16i8) (src3));  \
+                                                                             \
+    res0_m = __msa_dotp_s_h(vec0_m, (v16i8) (filt0));                        \
+    res1_m = __msa_dotp_s_h(vec1_m, (v16i8) (filt0));                        \
+    res2_m = __msa_dotp_s_h(vec2_m, (v16i8) (filt0));                        \
+    res3_m = __msa_dotp_s_h(vec3_m, (v16i8) (filt0));                        \
+                                                                             \
+    vec0_m = __msa_vshf_b((v16i8) (mask2), (v16i8) (src0), (v16i8) (src0));  \
+    vec1_m = __msa_vshf_b((v16i8) (mask2), (v16i8) (src1), (v16i8) (src1));  \
+    vec2_m = __msa_vshf_b((v16i8) (mask2), (v16i8) (src2), (v16i8) (src2));  \
+    vec3_m = __msa_vshf_b((v16i8) (mask2), (v16i8) (src3), (v16i8) (src3));  \
+                                                                             \
+    res4_m = __msa_dotp_s_h(vec0_m, (v16i8) (filt2));                        \
+    res5_m = __msa_dotp_s_h(vec1_m, (v16i8) (filt2));                        \
+    res6_m = __msa_dotp_s_h(vec2_m, (v16i8) (filt2));                        \
+    res7_m = __msa_dotp_s_h(vec3_m, (v16i8) (filt2));                        \
+                                                                             \
+    vec4_m = __msa_vshf_b((v16i8) (mask1), (v16i8) (src0), (v16i8) (src0));  \
+    vec5_m = __msa_vshf_b((v16i8) (mask1), (v16i8) (src1), (v16i8) (src1));  \
+    vec6_m = __msa_vshf_b((v16i8) (mask1), (v16i8) (src2), (v16i8) (src2));  \
+    vec7_m = __msa_vshf_b((v16i8) (mask1), (v16i8) (src3), (v16i8) (src3));  \
+                                                                             \
+    res0_m = __msa_dpadd_s_h(res0_m, (v16i8) (filt1), vec4_m);               \
+    res1_m = __msa_dpadd_s_h(res1_m, (v16i8) (filt1), vec5_m);               \
+    res2_m = __msa_dpadd_s_h(res2_m, (v16i8) (filt1), vec6_m);               \
+    res3_m = __msa_dpadd_s_h(res3_m, (v16i8) (filt1), vec7_m);               \
+                                                                             \
+    vec4_m = __msa_vshf_b((v16i8) (mask3), (v16i8) (src0), (v16i8) (src0));  \
+    vec5_m = __msa_vshf_b((v16i8) (mask3), (v16i8) (src1), (v16i8) (src1));  \
+    vec6_m = __msa_vshf_b((v16i8) (mask3), (v16i8) (src2), (v16i8) (src2));  \
+    vec7_m = __msa_vshf_b((v16i8) (mask3), (v16i8) (src3), (v16i8) (src3));  \
+                                                                             \
+    res4_m = __msa_dpadd_s_h(res4_m, (v16i8) (filt3), vec4_m);               \
+    res5_m = __msa_dpadd_s_h(res5_m, (v16i8) (filt3), vec5_m);               \
+    res6_m = __msa_dpadd_s_h(res6_m, (v16i8) (filt3), vec6_m);               \
+    res7_m = __msa_dpadd_s_h(res7_m, (v16i8) (filt3), vec7_m);               \
+                                                                             \
+    out0 = __msa_adds_s_h(res0_m, res4_m);                                   \
+    out1 = __msa_adds_s_h(res1_m, res5_m);                                   \
+    out2 = __msa_adds_s_h(res2_m, res6_m);                                   \
+    out3 = __msa_adds_s_h(res3_m, res7_m);                                   \
+}
+
+static void common_hz_8t_4x4_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, uint8_t rnd_val)
+{
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 src0, src1, src2, src3;
+    v16u8 mask0, mask1, mask2, mask3;
+    v8i16 filt, out0, out1;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_UB(&mc_filt_mask_arr[16]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+    HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                               mask3, filt0, filt1, filt2, filt3, out0, out1);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+    out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+    out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+
+    PCKEV_2B_XORI128_STORE_4_BYTES_4(out0, out1, dst, dst_stride);
+}
+
+static void common_hz_8t_4x8_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, uint8_t rnd_val)
+{
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 src0, src1, src2, src3;
+    v16u8 mask0, mask1, mask2, mask3;
+    v8i16 filt, out0, out1, out2, out3;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_UB(&mc_filt_mask_arr[16]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+    src += (4 * src_stride);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+    HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                               mask3, filt0, filt1, filt2, filt3, out0, out1);
+
+    LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+    HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                               mask3, filt0, filt1, filt2, filt3, out2, out3);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+    out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+    out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+    out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+    out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+    PCKEV_2B_XORI128_STORE_4_BYTES_4(out0, out1, dst, dst_stride);
+    dst += (4 * dst_stride);
+    PCKEV_2B_XORI128_STORE_4_BYTES_4(out2, out3, dst, dst_stride);
+}
+
+static void common_hz_8t_4x16_msa(uint8_t *src, int32_t src_stride,
+                                  uint8_t *dst, int32_t dst_stride,
+                                  const int8_t *filter, uint8_t rnd_val)
+{
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 src0, src1, src2, src3;
+    v16u8 mask0, mask1, mask2, mask3;
+    v8i16 filt, out0, out1, out2, out3;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_UB(&mc_filt_mask_arr[16]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+    src += (4 * src_stride);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+    HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                               mask3, filt0, filt1, filt2, filt3, out0, out1);
+
+    LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+    src += (4 * src_stride);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+    HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                               mask3, filt0, filt1, filt2, filt3, out2, out3);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+    out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+    out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+    out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+    out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+    PCKEV_2B_XORI128_STORE_4_BYTES_4(out0, out1, dst, dst_stride);
+    dst += (4 * dst_stride);
+    PCKEV_2B_XORI128_STORE_4_BYTES_4(out2, out3, dst, dst_stride);
+    dst += (4 * dst_stride);
+
+    LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+    src += (4 * src_stride);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+    HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                               mask3, filt0, filt1, filt2, filt3, out0, out1);
+
+    LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+    src += (4 * src_stride);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+    HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                               mask3, filt0, filt1, filt2, filt3, out2, out3);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+    out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+    out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+    out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+    out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+    PCKEV_2B_XORI128_STORE_4_BYTES_4(out0, out1, dst, dst_stride);
+    dst += (4 * dst_stride);
+    PCKEV_2B_XORI128_STORE_4_BYTES_4(out2, out3, dst, dst_stride);
+}
+
+static void common_hz_8t_4w_msa(uint8_t *src, int32_t src_stride,
+                                uint8_t *dst, int32_t dst_stride,
+                                const int8_t *filter, int32_t height,
+                                uint8_t rnd_val)
+{
+    if (4 == height) {
+        common_hz_8t_4x4_msa(src, src_stride, dst, dst_stride, filter, rnd_val);
+    } else if (8 == height) {
+        common_hz_8t_4x8_msa(src, src_stride, dst, dst_stride, filter, rnd_val);
+    } else if (16 == height) {
+        common_hz_8t_4x16_msa(src, src_stride, dst, dst_stride, filter,
+                              rnd_val);
+    }
+}
+
+static void common_hz_8t_8x4_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, uint8_t rnd_val)
+{
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 src0, src1, src2, src3;
+    v16u8 mask0, mask1, mask2, mask3;
+    v8i16 filt, out0, out1, out2, out3;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+    HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                               mask3, filt0, filt1, filt2, filt3, out0, out1,
+                               out2, out3);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+    out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+    out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+    out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+    out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+    PCKEV_B_4_XORI128_STORE_8_BYTES_4(out0, out1, out2, out3, dst, dst_stride);
+}
+
+static void common_hz_8t_8x8mult_msa(uint8_t *src, int32_t src_stride,
+                                     uint8_t *dst, int32_t dst_stride,
+                                     const int8_t *filter, int32_t height,
+                                     uint8_t rnd_val)
+{
+    uint32_t loop_cnt;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 src0, src1, src2, src3;
+    v16u8 mask0, mask1, mask2, mask3;
+    v8i16 filt, out0, out1, out2, out3;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+        src += (4 * src_stride);
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                                   mask3, filt0, filt1, filt2, filt3, out0,
+                                   out1, out2, out3);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+        PCKEV_B_4_XORI128_STORE_8_BYTES_4(out0, out1, out2, out3,
+                                          dst, dst_stride);
+        dst += (4 * dst_stride);
+    }
+}
+
+static void common_hz_8t_8w_msa(uint8_t *src, int32_t src_stride,
+                                uint8_t *dst, int32_t dst_stride,
+                                const int8_t *filter, int32_t height,
+                                uint8_t rnd_val)
+{
+    if (4 == height) {
+        common_hz_8t_8x4_msa(src, src_stride, dst, dst_stride, filter, rnd_val);
+    } else {
+        common_hz_8t_8x8mult_msa(src, src_stride, dst, dst_stride,
+                                 filter, height, rnd_val);
+    }
+}
+
+static void common_hz_8t_12w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    uint8_t *src1_ptr, *dst1;
+    uint32_t loop_cnt;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 src0, src1, src2, src3;
+    v8u16 rnd_vec;
+    v16u8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask00;
+    v8i16 filt, out0, out1, out2, out3;
+
+    mask00 = LOAD_UB(&mc_filt_mask_arr[0]);
+    mask0 = LOAD_UB(&mc_filt_mask_arr[16]);
+
+    src1_ptr = src - 3;
+    dst1 = dst;
+
+    dst = dst1 + 8;
+    src = src1_ptr + 8;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask00 + 2;
+    mask2 = mask00 + 4;
+    mask3 = mask00 + 6;
+    mask4 = mask0 + 2;
+    mask5 = mask0 + 4;
+    mask6 = mask0 + 6;
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        /* 8 width */
+        LOAD_4VECS_SB(src1_ptr, src_stride, src0, src1, src2, src3);
+        src1_ptr += (4 * src_stride);
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask00, mask1, mask2,
+                                   mask3, filt0, filt1, filt2, filt3, out0,
+                                   out1, out2, out3);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+        PCKEV_B_4_XORI128_STORE_8_BYTES_4(out0, out1, out2, out3,
+                                          dst1, dst_stride);
+        dst1 += (4 * dst_stride);
+
+        /* 4 width */
+        LOAD_4VECS_SB(src, src_stride, src0, src1, src2, src3);
+        src += (4 * src_stride);
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask4, mask5,
+                                   mask6, filt0, filt1, filt2, filt3, out0,
+                                   out1);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+
+        PCKEV_2B_XORI128_STORE_4_BYTES_4(out0, out1, dst, dst_stride);
+        dst += (4 * dst_stride);
+    }
+}
+
+static void common_hz_8t_16w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16u8 mask0, mask1, mask2, mask3;
+    v8i16 filt, out0, out1, out2, out3;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    for (loop_cnt = (height >> 1); loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src1 = LOAD_SB(src + 8);
+        src += src_stride;
+        src2 = LOAD_SB(src);
+        src3 = LOAD_SB(src + 8);
+        src += src_stride;
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                                   mask3, filt0, filt1, filt2, filt3, out0,
+                                   out1, out2, out3);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+        PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
+        dst += dst_stride;
+        PCKEV_B_XORI128_STORE_VEC(out3, out2, dst);
+        dst += dst_stride;
+    }
+}
+
+static void common_hz_8t_24w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+    v16i8 vec0, vec1, vec2, vec3, vec4, vec5;
+    v16i8 vec6, vec7, vec8, vec9, vec10, vec11;
+    v8i16 out0, out1, out2, out3, out4, out5;
+    v8i16 out6, out7, out8, out9, out10, out11;
+    v8i16 filt;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_SB(&mc_filt_mask_arr[0]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+    mask4 = mask0 + 8;
+    mask5 = mask0 + 10;
+    mask6 = mask0 + 12;
+    mask7 = mask0 + 14;
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    for (loop_cnt = (height >> 1); loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src1 = LOAD_SB(src + 16);
+        src += src_stride;
+        src2 = LOAD_SB(src);
+        src3 = LOAD_SB(src + 16);
+        src += src_stride;
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        vec0 = __msa_vshf_b(mask0, src0, src0);
+        vec8 = __msa_vshf_b(mask0, src1, src1);
+        vec2 = __msa_vshf_b(mask0, src2, src2);
+        vec9 = __msa_vshf_b(mask0, src3, src3);
+        vec1 = __msa_vshf_b(mask4, src1, src0);
+        vec3 = __msa_vshf_b(mask4, src3, src2);
+
+        out0 = __msa_dotp_s_h(vec0, filt0);
+        out8 = __msa_dotp_s_h(vec8, filt0);
+        out2 = __msa_dotp_s_h(vec2, filt0);
+        out9 = __msa_dotp_s_h(vec9, filt0);
+        out1 = __msa_dotp_s_h(vec1, filt0);
+        out3 = __msa_dotp_s_h(vec3, filt0);
+
+        vec0 = __msa_vshf_b(mask2, src0, src0);
+        vec8 = __msa_vshf_b(mask2, src1, src1);
+        vec2 = __msa_vshf_b(mask2, src2, src2);
+        vec9 = __msa_vshf_b(mask2, src3, src3);
+        vec1 = __msa_vshf_b(mask6, src1, src0);
+        vec3 = __msa_vshf_b(mask6, src3, src2);
+
+        out4 = __msa_dotp_s_h(vec0, filt2);
+        out10 = __msa_dotp_s_h(vec8, filt2);
+        out6 = __msa_dotp_s_h(vec2, filt2);
+        out11 = __msa_dotp_s_h(vec9, filt2);
+        out5 = __msa_dotp_s_h(vec1, filt2);
+        out7 = __msa_dotp_s_h(vec3, filt2);
+
+        vec4 = __msa_vshf_b(mask1, src0, src0);
+        vec10 = __msa_vshf_b(mask1, src1, src1);
+        vec6 = __msa_vshf_b(mask1, src2, src2);
+        vec11 = __msa_vshf_b(mask1, src3, src3);
+        vec5 = __msa_vshf_b(mask5, src1, src0);
+        vec7 = __msa_vshf_b(mask5, src3, src2);
+
+        out0 = __msa_dpadd_s_h(out0, vec4, filt1);
+        out8 = __msa_dpadd_s_h(out8, vec10, filt1);
+        out2 = __msa_dpadd_s_h(out2, vec6, filt1);
+        out9 = __msa_dpadd_s_h(out9, vec11, filt1);
+        out1 = __msa_dpadd_s_h(out1, vec5, filt1);
+        out3 = __msa_dpadd_s_h(out3, vec7, filt1);
+
+        vec4 = __msa_vshf_b(mask3, src0, src0);
+        vec10 = __msa_vshf_b(mask3, src1, src1);
+        vec6 = __msa_vshf_b(mask3, src2, src2);
+        vec11 = __msa_vshf_b(mask3, src3, src3);
+        vec5 = __msa_vshf_b(mask7, src1, src0);
+        vec7 = __msa_vshf_b(mask7, src3, src2);
+
+        out4 = __msa_dpadd_s_h(out4, vec4, filt3);
+        out10 = __msa_dpadd_s_h(out10, vec10, filt3);
+        out6 = __msa_dpadd_s_h(out6, vec6, filt3);
+        out11 = __msa_dpadd_s_h(out11, vec11, filt3);
+        out5 = __msa_dpadd_s_h(out5, vec5, filt3);
+        out7 = __msa_dpadd_s_h(out7, vec7, filt3);
+
+        out0 = __msa_adds_s_h(out0, out4);
+        out8 = __msa_adds_s_h(out8, out10);
+        out2 = __msa_adds_s_h(out2, out6);
+        out9 = __msa_adds_s_h(out9, out11);
+        out1 = __msa_adds_s_h(out1, out5);
+        out3 = __msa_adds_s_h(out3, out7);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out8 = SRAR_SATURATE_SIGNED_H(out8, rnd_vec, 7);
+        out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+        out9 = SRAR_SATURATE_SIGNED_H(out9, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+        PCKEV_B_XORI128_STORE_8_BYTES_2(out8, out9, dst + 16, dst_stride);
+
+        PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
+        dst += dst_stride;
+        PCKEV_B_XORI128_STORE_VEC(out3, out2, dst);
+        dst += dst_stride;
+    }
+}
+
+static void common_hz_8t_32w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16u8 mask0, mask1, mask2, mask3;
+    v8i16 filt, out0, out1, out2, out3;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    for (loop_cnt = (height >> 1); loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src2 = LOAD_SB(src + 16);
+        src3 = LOAD_SB(src + 24);
+        src1 = __msa_sld_b(src2, src0, 8);
+        src += src_stride;
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                                   mask3, filt0, filt1, filt2, filt3, out0,
+                                   out1, out2, out3);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+        src0 = LOAD_SB(src);
+        src2 = LOAD_SB(src + 16);
+        src3 = LOAD_SB(src + 24);
+        src1 = __msa_sld_b(src2, src0, 8);
+        src += src_stride;
+
+        PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
+        PCKEV_B_XORI128_STORE_VEC(out3, out2, (dst + 16));
+        dst += dst_stride;
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+                                   mask3, filt0, filt1, filt2, filt3, out0,
+                                   out1, out2, out3);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+        PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
+        PCKEV_B_XORI128_STORE_VEC(out3, out2, (dst + 16));
+        dst += dst_stride;
+    }
+}
+
+static void common_hz_8t_48w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3;
+    v16i8 vec0, vec1, vec2;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
+    v8i16 filt, out0, out1, out2, out3, out4, out5, out6;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_SB(&mc_filt_mask_arr[0]);
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+    mask4 = mask0 + 8;
+    mask5 = mask0 + 10;
+    mask6 = mask0 + 12;
+    mask7 = mask0 + 14;
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    src -= 3;
+
+    for (loop_cnt = height; loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src2 = LOAD_SB(src + 16);
+        src3 = LOAD_SB(src + 32);
+        src1 = __msa_sld_b(src2, src0, 8);
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        vec0 = __msa_vshf_b(mask0, src0, src0);
+        vec1 = __msa_vshf_b(mask0, src1, src1);
+        vec2 = __msa_vshf_b(mask0, src2, src2);
+        out0 = __msa_dotp_s_h(vec0, filt0);
+        out1 = __msa_dotp_s_h(vec1, filt0);
+        out2 = __msa_dotp_s_h(vec2, filt0);
+
+        vec0 = __msa_vshf_b(mask1, src0, src0);
+        vec1 = __msa_vshf_b(mask1, src1, src1);
+        vec2 = __msa_vshf_b(mask1, src2, src2);
+        out0 = __msa_dpadd_s_h(out0, vec0, filt1);
+        out1 = __msa_dpadd_s_h(out1, vec1, filt1);
+        out2 = __msa_dpadd_s_h(out2, vec2, filt1);
+
+        vec0 = __msa_vshf_b(mask2, src0, src0);
+        vec1 = __msa_vshf_b(mask2, src1, src1);
+        vec2 = __msa_vshf_b(mask2, src2, src2);
+        out3 = __msa_dotp_s_h(vec0, filt2);
+        out4 = __msa_dotp_s_h(vec1, filt2);
+        out5 = __msa_dotp_s_h(vec2, filt2);
+
+        vec0 = __msa_vshf_b(mask3, src0, src0);
+        vec1 = __msa_vshf_b(mask3, src1, src1);
+        vec2 = __msa_vshf_b(mask3, src2, src2);
+        out3 = __msa_dpadd_s_h(out3, vec0, filt3);
+        out4 = __msa_dpadd_s_h(out4, vec1, filt3);
+        out5 = __msa_dpadd_s_h(out5, vec2, filt3);
+
+        out0 = __msa_adds_s_h(out0, out3);
+        out1 = __msa_adds_s_h(out1, out4);
+        out2 = __msa_adds_s_h(out2, out5);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out6 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+
+        PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
+
+        src1 = LOAD_SB(src + 40);
+        src1 = (v16i8) __msa_xori_b((v16u8) src1, 128);
+
+        vec0 = __msa_vshf_b(mask4, src3, src2);
+        vec1 = __msa_vshf_b(mask0, src3, src3);
+        vec2 = __msa_vshf_b(mask0, src1, src1);
+        out0 = __msa_dotp_s_h(vec0, filt0);
+        out1 = __msa_dotp_s_h(vec1, filt0);
+        out2 = __msa_dotp_s_h(vec2, filt0);
+
+        vec0 = __msa_vshf_b(mask5, src3, src2);
+        vec1 = __msa_vshf_b(mask1, src3, src3);
+        vec2 = __msa_vshf_b(mask1, src1, src1);
+        out0 = __msa_dpadd_s_h(out0, vec0, filt1);
+        out1 = __msa_dpadd_s_h(out1, vec1, filt1);
+        out2 = __msa_dpadd_s_h(out2, vec2, filt1);
+
+        vec0 = __msa_vshf_b(mask6, src3, src2);
+        vec1 = __msa_vshf_b(mask2, src3, src3);
+        vec2 = __msa_vshf_b(mask2, src1, src1);
+        out3 = __msa_dotp_s_h(vec0, filt2);
+        out4 = __msa_dotp_s_h(vec1, filt2);
+        out5 = __msa_dotp_s_h(vec2, filt2);
+
+        vec0 = __msa_vshf_b(mask7, src3, src2);
+        vec1 = __msa_vshf_b(mask3, src3, src3);
+        vec2 = __msa_vshf_b(mask3, src1, src1);
+        out3 = __msa_dpadd_s_h(out3, vec0, filt3);
+        out4 = __msa_dpadd_s_h(out4, vec1, filt3);
+        out5 = __msa_dpadd_s_h(out5, vec2, filt3);
+
+        out3 = __msa_adds_s_h(out0, out3);
+        out4 = __msa_adds_s_h(out1, out4);
+        out5 = __msa_adds_s_h(out2, out5);
+
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+        out4 = SRAR_SATURATE_SIGNED_H(out4, rnd_vec, 7);
+        out5 = SRAR_SATURATE_SIGNED_H(out5, rnd_vec, 7);
+
+        PCKEV_B_XORI128_STORE_VEC(out3, out6, (dst + 16));
+        PCKEV_B_XORI128_STORE_VEC(out5, out4, (dst + 32));
+
+        src += src_stride;
+        dst += dst_stride;
+    }
+}
+
+static void common_hz_8t_64w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    int32_t loop_cnt;
+    v16i8 src0, src1, src2, src3;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16u8 mask0, mask1, mask2, mask3;
+    v8i16 filt, out0, out1, out2, out3;
+    v8u16 rnd_vec;
+
+    mask0 = LOAD_UB(&mc_filt_mask_arr[0]);
+
+    src -= 3;
+
+    /* rearranging filter */
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    mask1 = mask0 + 2;
+    mask2 = mask0 + 4;
+    mask3 = mask0 + 6;
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    for (loop_cnt = height; loop_cnt--;) {
+        src0 = LOAD_SB(src);
+        src2 = LOAD_SB(src + 16);
+        src3 = LOAD_SB(src + 24);
+        src1 = __msa_sld_b(src2, src0, 8);
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
+                                   mask2, mask3, filt0, filt1, filt2, filt3,
+                                   out0, out1, out2, out3);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+        PCKEV_B_XORI128_STORE_VEC(out1, out0, dst);
+        PCKEV_B_XORI128_STORE_VEC(out3, out2, dst + 16);
+
+        src0 = LOAD_SB(src + 32);
+        src2 = LOAD_SB(src + 48);
+        src3 = LOAD_SB(src + 56);
+        src1 = __msa_sld_b(src2, src0, 8);
+
+        XORI_B_4VECS_SB(src0, src1, src2, src3, src0, src1, src2, src3, 128);
+
+        HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
+                                   mask2, mask3, filt0, filt1, filt2, filt3,
+                                   out0, out1, out2, out3);
+
+        out0 = SRAR_SATURATE_SIGNED_H(out0, rnd_vec, 7);
+        out1 = SRAR_SATURATE_SIGNED_H(out1, rnd_vec, 7);
+        out2 = SRAR_SATURATE_SIGNED_H(out2, rnd_vec, 7);
+        out3 = SRAR_SATURATE_SIGNED_H(out3, rnd_vec, 7);
+
+        PCKEV_B_XORI128_STORE_VEC(out1, out0, dst + 32);
+        PCKEV_B_XORI128_STORE_VEC(out3, out2, dst + 48);
+
+        src += src_stride;
+        dst += dst_stride;
+    }
+}
+
+static void common_vt_8t_4w_msa(uint8_t *src, int32_t src_stride,
+                                uint8_t *dst, int32_t dst_stride,
+                                const int8_t *filter, int32_t height,
+                                uint8_t rnd_val)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+    v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
+    v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
+    v16i8 src2110, src4332, src6554, src8776, src10998;
+    v16i8 filt0, filt1, filt2, filt3;
+    v8i16 filt, out10, out32;
+    v8u16 rnd_vec;
+
+    src -= (3 * src_stride);
+
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    LOAD_7VECS_SB(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+    src += (7 * src_stride);
+
+    ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                    src1, src3, src5, src2, src4, src6,
+                    src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+
+    ILVR_D_3VECS_SB(src2110, src21_r, src10_r, src4332, src43_r, src32_r,
+                    src6554, src65_r, src54_r);
+
+    XORI_B_3VECS_SB(src2110, src4332, src6554, src2110, src4332, src6554, 128);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+        src += (4 * src_stride);
+
+        ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                        src76_r, src87_r, src98_r, src109_r);
+
+        ILVR_D_2VECS_SB(src8776, src87_r, src76_r, src10998, src109_r, src98_r);
+
+        XORI_B_2VECS_SB(src8776, src10998, src8776, src10998, 128);
+
+        out10 = FILT_8TAP_DPADD_S_H(src2110, src4332, src6554, src8776,
+                                    filt0, filt1, filt2, filt3);
+        out32 = FILT_8TAP_DPADD_S_H(src4332, src6554, src8776, src10998,
+                                    filt0, filt1, filt2, filt3);
+
+        out10 = SRAR_SATURATE_SIGNED_H(out10, rnd_vec, 7);
+        out32 = SRAR_SATURATE_SIGNED_H(out32, rnd_vec, 7);
+
+        PCKEV_2B_XORI128_STORE_4_BYTES_4(out10, out32, dst, dst_stride);
+        dst += (4 * dst_stride);
+
+        src2110 = src6554;
+        src4332 = src8776;
+        src6554 = src10998;
+
+        src6 = src10;
+    }
+}
+
+static void common_vt_8t_8w_msa(uint8_t *src, int32_t src_stride,
+                                uint8_t *dst, int32_t dst_stride,
+                                const int8_t *filter, int32_t height,
+                                uint8_t rnd_val)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+    v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
+    v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
+    v16i8 filt0, filt1, filt2, filt3;
+    v8i16 filt, out0_r, out1_r, out2_r, out3_r;
+    v8u16 rnd_vec;
+
+    src -= (3 * src_stride);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    LOAD_7VECS_SB(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+    src += (7 * src_stride);
+
+    XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
+                    src0, src1, src2, src3, src4, src5, src6, 128);
+
+    ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                    src1, src3, src5, src2, src4, src6,
+                    src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+        src += (4 * src_stride);
+
+        XORI_B_4VECS_SB(src7, src8, src9, src10, src7, src8, src9, src10, 128);
+
+        ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                        src76_r, src87_r, src98_r, src109_r);
+
+        out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r,
+                                     filt0, filt1, filt2, filt3);
+        out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r,
+                                     filt0, filt1, filt2, filt3);
+        out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r,
+                                     filt0, filt1, filt2, filt3);
+        out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r,
+                                     filt0, filt1, filt2, filt3);
+
+        out0_r = SRAR_SATURATE_SIGNED_H(out0_r, rnd_vec, 7);
+        out1_r = SRAR_SATURATE_SIGNED_H(out1_r, rnd_vec, 7);
+        out2_r = SRAR_SATURATE_SIGNED_H(out2_r, rnd_vec, 7);
+        out3_r = SRAR_SATURATE_SIGNED_H(out3_r, rnd_vec, 7);
+
+        PCKEV_B_4_XORI128_STORE_8_BYTES_4(out0_r, out1_r, out2_r, out3_r,
+                                          dst, dst_stride);
+        dst += (4 * dst_stride);
+
+        src10_r = src54_r;
+        src32_r = src76_r;
+        src54_r = src98_r;
+        src21_r = src65_r;
+        src43_r = src87_r;
+        src65_r = src109_r;
+
+        src6 = src10;
+    }
+}
+
+static void common_vt_8t_12w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    int32_t loop_cnt;
+    uint32_t out2, out3;
+    uint64_t out0, out1;
+    v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
+    v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+    v16i8 vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15, vec16;
+    v16i8 res0, res1, res2;
+    v8i16 vec01, vec23, vec45, vec67;
+    v8i16 tmp0, tmp1, tmp2;
+    v8i16 filt, filt0, filt1, filt2, filt3;
+    v8u16 rnd_vec;
+    v4i32 mask = { 2, 6, 2, 6 };
+
+    src -= (3 * src_stride);
+
+    LOAD_7VECS_UB(src, src_stride, src0, src1, src2, src3, src4, src5, src6);
+    src += (7 * src_stride);
+
+    XORI_B_4VECS_SB(src0, src1, src2, src3, vec0, vec1, vec2, vec3, 128);
+    vec4 = (v16i8) __msa_xori_b((v16u8) src4, 128);
+    vec5 = (v16i8) __msa_xori_b((v16u8) src5, 128);
+    vec6 = (v16i8) __msa_xori_b((v16u8) src6, 128);
+
+    /* 4 width */
+    vec9 = (v16i8) __msa_vshf_w(mask, (v4i32) vec1, (v4i32) vec0);
+    vec10 = (v16i8) __msa_vshf_w(mask, (v4i32) vec2, (v4i32) vec1);
+    vec11 = (v16i8) __msa_vshf_w(mask, (v4i32) vec3, (v4i32) vec2);
+    vec12 = (v16i8) __msa_vshf_w(mask, (v4i32) vec4, (v4i32) vec3);
+    vec13 = (v16i8) __msa_vshf_w(mask, (v4i32) vec5, (v4i32) vec4);
+    vec14 = (v16i8) __msa_vshf_w(mask, (v4i32) vec6, (v4i32) vec5);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    /* rearranging filter_y */
+    filt = LOAD_SH(filter);
+    filt0 = (v8i16) __msa_splati_h(filt, 0);
+    filt1 = (v8i16) __msa_splati_h(filt, 1);
+    filt2 = (v8i16) __msa_splati_h(filt, 2);
+    filt3 = (v8i16) __msa_splati_h(filt, 3);
+
+    for (loop_cnt = (height >> 1); loop_cnt--;) {
+        LOAD_2VECS_UB(src, src_stride, src7, src8);
+        src += (2 * src_stride);
+
+        XORI_B_2VECS_SB(src7, src8, vec7, vec8, 128);
+
+        ILVR_B_4VECS_SH(vec0, vec2, vec4, vec6, vec1, vec3, vec5, vec7,
+                        vec01, vec23, vec45, vec67);
+
+        tmp0 = FILT_8TAP_DPADD_S_H(vec01, vec23, vec45, vec67, filt0, filt1,
+                                   filt2, filt3);
+
+        ILVR_B_4VECS_SH(vec1, vec3, vec5, vec7, vec2, vec4, vec6, vec8,
+                        vec01, vec23, vec45, vec67);
+
+        tmp1 = FILT_8TAP_DPADD_S_H(vec01, vec23, vec45, vec67, filt0, filt1,
+                                   filt2, filt3);
+
+        /* 4 width */
+        vec15 = (v16i8) __msa_vshf_w(mask, (v4i32) vec7, (v4i32) vec6);
+        vec16 = (v16i8) __msa_vshf_w(mask, (v4i32) vec8, (v4i32) vec7);
+
+        ILVR_B_4VECS_SH(vec9, vec11, vec13, vec15, vec10, vec12, vec14, vec16,
+                        vec01, vec23, vec45, vec67);
+
+        tmp2 = FILT_8TAP_DPADD_S_H(vec01, vec23, vec45, vec67, filt0, filt1,
+                                   filt2, filt3);
+
+        tmp0 = SRAR_SATURATE_SIGNED_H(tmp0, rnd_vec, 7);
+        tmp1 = SRAR_SATURATE_SIGNED_H(tmp1, rnd_vec, 7);
+        tmp2 = SRAR_SATURATE_SIGNED_H(tmp2, rnd_vec, 7);
+
+        res0 = __msa_pckev_b((v16i8) tmp0, (v16i8) tmp0);
+        res1 = __msa_pckev_b((v16i8) tmp1, (v16i8) tmp1);
+        res2 = __msa_pckev_b((v16i8) tmp2, (v16i8) tmp2);
+
+        XORI_B_3VECS_SB(res0, res1, res2, res0, res1, res2, 128);
+
+        out0 = __msa_copy_u_d((v2i64) res0, 0);
+        out1 = __msa_copy_u_d((v2i64) res1, 0);
+        out2 = __msa_copy_u_w((v4i32) res2, 0);
+        out3 = __msa_copy_u_w((v4i32) res2, 1);
+
+        STORE_DWORD(dst, out0);
+        STORE_WORD((dst + 8), out2);
+        dst += dst_stride;
+        STORE_DWORD(dst, out1);
+        STORE_WORD((dst + 8), out3);
+        dst += dst_stride;
+
+        vec0 = vec2;
+        vec1 = vec3;
+        vec2 = vec4;
+        vec3 = vec5;
+        vec4 = vec6;
+        vec5 = vec7;
+        vec6 = vec8;
+        vec9 = vec11;
+        vec10 = vec12;
+        vec11 = vec13;
+        vec12 = vec14;
+        vec13 = vec15;
+        vec14 = vec16;
+    }
+}
+
+static void common_vt_8t_16w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    uint32_t loop_cnt;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
+    v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
+    v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
+    v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
+    v8i16 out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
+    v8i16 filt;
+    v8u16 rnd_vec;
+    v16u8 tmp0, tmp1, tmp2, tmp3;
+
+    src -= (3 * src_stride);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    LOAD_7VECS_SB(src, src_stride,
+                  src0, src1, src2, src3, src4, src5, src6);
+    src += (7 * src_stride);
+
+    XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
+                    src0, src1, src2, src3, src4, src5, src6, 128);
+
+    ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                    src1, src3, src5, src2, src4, src6,
+                    src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+
+    ILVL_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                    src1, src3, src5, src2, src4, src6,
+                    src10_l, src32_l, src54_l, src21_l, src43_l, src65_l);
+
+    for (loop_cnt = (height >> 2); loop_cnt--;) {
+        LOAD_4VECS_SB(src, src_stride, src7, src8, src9, src10);
+        src += (4 * src_stride);
+
+        XORI_B_4VECS_SB(src7, src8, src9, src10,
+                        src7, src8, src9, src10, 128);
+
+        ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                        src76_r, src87_r, src98_r, src109_r);
+
+        ILVL_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                        src76_l, src87_l, src98_l, src109_l);
+
+        out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r,
+                                     filt0, filt1, filt2, filt3);
+        out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r,
+                                     filt0, filt1, filt2, filt3);
+        out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r,
+                                     filt0, filt1, filt2, filt3);
+        out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r,
+                                     filt0, filt1, filt2, filt3);
+
+        out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l,
+                                     filt0, filt1, filt2, filt3);
+        out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l,
+                                     filt0, filt1, filt2, filt3);
+        out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l,
+                                     filt0, filt1, filt2, filt3);
+        out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l,
+                                     filt0, filt1, filt2, filt3);
+
+        out0_r = SRAR_SATURATE_SIGNED_H(out0_r, rnd_vec, 7);
+        out1_r = SRAR_SATURATE_SIGNED_H(out1_r, rnd_vec, 7);
+        out2_r = SRAR_SATURATE_SIGNED_H(out2_r, rnd_vec, 7);
+        out3_r = SRAR_SATURATE_SIGNED_H(out3_r, rnd_vec, 7);
+        out0_l = SRAR_SATURATE_SIGNED_H(out0_l, rnd_vec, 7);
+        out1_l = SRAR_SATURATE_SIGNED_H(out1_l, rnd_vec, 7);
+        out2_l = SRAR_SATURATE_SIGNED_H(out2_l, rnd_vec, 7);
+        out3_l = SRAR_SATURATE_SIGNED_H(out3_l, rnd_vec, 7);
+
+        PCKEV_B_4VECS_UB(out0_l, out1_l, out2_l, out3_l, out0_r, out1_r, out2_r,
+                         out3_r, tmp0, tmp1, tmp2, tmp3);
+
+        XORI_B_4VECS_UB(tmp0, tmp1, tmp2, tmp3, tmp0, tmp1, tmp2, tmp3, 128);
+
+        STORE_4VECS_UB(dst, dst_stride, tmp0, tmp1, tmp2, tmp3);
+        dst += (4 * dst_stride);
+
+        src10_r = src54_r;
+        src32_r = src76_r;
+        src54_r = src98_r;
+        src21_r = src65_r;
+        src43_r = src87_r;
+        src65_r = src109_r;
+        src10_l = src54_l;
+        src32_l = src76_l;
+        src54_l = src98_l;
+        src21_l = src65_l;
+        src43_l = src87_l;
+        src65_l = src109_l;
+
+        src6 = src10;
+    }
+}
+
+static void common_vt_8t_16w_mult_msa(uint8_t *src, int32_t src_stride,
+                                      uint8_t *dst, int32_t dst_stride,
+                                      const int8_t *filter, int32_t height,
+                                      uint8_t rnd_val, int32_t width)
+{
+    uint8_t *src_tmp;
+    uint8_t *dst_tmp;
+    uint32_t loop_cnt, cnt;
+    v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+    v16i8 filt0, filt1, filt2, filt3;
+    v16i8 src10_r, src32_r, src54_r, src76_r, src98_r;
+    v16i8 src21_r, src43_r, src65_r, src87_r, src109_r;
+    v16i8 src10_l, src32_l, src54_l, src76_l, src98_l;
+    v16i8 src21_l, src43_l, src65_l, src87_l, src109_l;
+    v8i16 filt, out0_r, out1_r, out2_r, out3_r, out0_l, out1_l, out2_l, out3_l;
+    v16u8 tmp0, tmp1, tmp2, tmp3;
+    v8u16 rnd_vec;
+
+    src -= (3 * src_stride);
+
+    rnd_vec = (v8u16) __msa_fill_h(rnd_val);
+
+    filt = LOAD_SH(filter);
+    filt0 = (v16i8) __msa_splati_h(filt, 0);
+    filt1 = (v16i8) __msa_splati_h(filt, 1);
+    filt2 = (v16i8) __msa_splati_h(filt, 2);
+    filt3 = (v16i8) __msa_splati_h(filt, 3);
+
+    for (cnt = (width >> 4); cnt--;) {
+        src_tmp = src;
+        dst_tmp = dst;
+
+        LOAD_7VECS_SB(src_tmp, src_stride,
+                      src0, src1, src2, src3, src4, src5, src6);
+        src_tmp += (7 * src_stride);
+
+        XORI_B_7VECS_SB(src0, src1, src2, src3, src4, src5, src6,
+                        src0, src1, src2, src3, src4, src5, src6, 128);
+
+        ILVR_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                        src1, src3, src5, src2, src4, src6,
+                        src10_r, src32_r, src54_r, src21_r, src43_r, src65_r);
+
+        ILVL_B_6VECS_SB(src0, src2, src4, src1, src3, src5,
+                        src1, src3, src5, src2, src4, src6,
+                        src10_l, src32_l, src54_l, src21_l, src43_l, src65_l);
+
+        for (loop_cnt = (height >> 2); loop_cnt--;) {
+            LOAD_4VECS_SB(src_tmp, src_stride, src7, src8, src9, src10);
+            src_tmp += (4 * src_stride);
+
+            XORI_B_4VECS_SB(src7, src8, src9, src10,
+                            src7, src8, src9, src10, 128);
+
+            ILVR_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                            src76_r, src87_r, src98_r, src109_r);
+
+            ILVL_B_4VECS_SB(src6, src7, src8, src9, src7, src8, src9, src10,
+                            src76_l, src87_l, src98_l, src109_l);
+
+            out0_r = FILT_8TAP_DPADD_S_H(src10_r, src32_r, src54_r, src76_r,
+                                         filt0, filt1, filt2, filt3);
+            out1_r = FILT_8TAP_DPADD_S_H(src21_r, src43_r, src65_r, src87_r,
+                                         filt0, filt1, filt2, filt3);
+            out2_r = FILT_8TAP_DPADD_S_H(src32_r, src54_r, src76_r, src98_r,
+                                         filt0, filt1, filt2, filt3);
+            out3_r = FILT_8TAP_DPADD_S_H(src43_r, src65_r, src87_r, src109_r,
+                                         filt0, filt1, filt2, filt3);
+
+            out0_l = FILT_8TAP_DPADD_S_H(src10_l, src32_l, src54_l, src76_l,
+                                         filt0, filt1, filt2, filt3);
+            out1_l = FILT_8TAP_DPADD_S_H(src21_l, src43_l, src65_l, src87_l,
+                                         filt0, filt1, filt2, filt3);
+            out2_l = FILT_8TAP_DPADD_S_H(src32_l, src54_l, src76_l, src98_l,
+                                         filt0, filt1, filt2, filt3);
+            out3_l = FILT_8TAP_DPADD_S_H(src43_l, src65_l, src87_l, src109_l,
+                                         filt0, filt1, filt2, filt3);
+
+            out0_r = SRAR_SATURATE_SIGNED_H(out0_r, rnd_vec, 7);
+            out1_r = SRAR_SATURATE_SIGNED_H(out1_r, rnd_vec, 7);
+            out2_r = SRAR_SATURATE_SIGNED_H(out2_r, rnd_vec, 7);
+            out3_r = SRAR_SATURATE_SIGNED_H(out3_r, rnd_vec, 7);
+            out0_l = SRAR_SATURATE_SIGNED_H(out0_l, rnd_vec, 7);
+            out1_l = SRAR_SATURATE_SIGNED_H(out1_l, rnd_vec, 7);
+            out2_l = SRAR_SATURATE_SIGNED_H(out2_l, rnd_vec, 7);
+            out3_l = SRAR_SATURATE_SIGNED_H(out3_l, rnd_vec, 7);
+
+            PCKEV_B_4VECS_UB(out0_l, out1_l, out2_l, out3_l, out0_r, out1_r,
+                             out2_r, out3_r, tmp0, tmp1, tmp2, tmp3);
+
+            XORI_B_4VECS_UB(tmp0, tmp1, tmp2, tmp3,
+                            tmp0, tmp1, tmp2, tmp3, 128);
+
+            STORE_4VECS_UB(dst_tmp, dst_stride, tmp0, tmp1, tmp2, tmp3);
+            dst_tmp += (4 * dst_stride);
+
+            src10_r = src54_r;
+            src32_r = src76_r;
+            src54_r = src98_r;
+            src21_r = src65_r;
+            src43_r = src87_r;
+            src65_r = src109_r;
+            src10_l = src54_l;
+            src32_l = src76_l;
+            src54_l = src98_l;
+            src21_l = src65_l;
+            src43_l = src87_l;
+            src65_l = src109_l;
+
+            src6 = src10;
+        }
+
+        src += 16;
+        dst += 16;
+    }
+}
+
+static void common_vt_8t_24w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride, filter,
+                              height, rnd_val, 16);
+
+    common_vt_8t_8w_msa(src + 16, src_stride, dst + 16, dst_stride,
+                        filter, height, rnd_val);
+}
+
+static void common_vt_8t_32w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride,
+                              filter, height, rnd_val, 32);
+}
+
+static void common_vt_8t_48w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride,
+                              filter, height, rnd_val, 48);
+}
+
+static void common_vt_8t_64w_msa(uint8_t *src, int32_t src_stride,
+                                 uint8_t *dst, int32_t dst_stride,
+                                 const int8_t *filter, int32_t height,
+                                 uint8_t rnd_val)
+{
+    common_vt_8t_16w_mult_msa(src, src_stride, dst, dst_stride,
+                              filter, height, rnd_val, 64);
+}
+
+static void copy_width8_msa(uint8_t *src, int32_t src_stride,
+                            uint8_t *dst, int32_t dst_stride,
+                            int32_t height)
+{
+    int32_t cnt;
+    uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
+    v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+    if (0 == height % 12) {
+        for (cnt = (height / 12); cnt--;) {
+            LOAD_8VECS_UB(src, src_stride,
+                          src0, src1, src2, src3, src4, src5, src6, src7);
+            src += (8 * src_stride);
+
+            out0 = __msa_copy_u_d((v2i64) src0, 0);
+            out1 = __msa_copy_u_d((v2i64) src1, 0);
+            out2 = __msa_copy_u_d((v2i64) src2, 0);
+            out3 = __msa_copy_u_d((v2i64) src3, 0);
+            out4 = __msa_copy_u_d((v2i64) src4, 0);
+            out5 = __msa_copy_u_d((v2i64) src5, 0);
+            out6 = __msa_copy_u_d((v2i64) src6, 0);
+            out7 = __msa_copy_u_d((v2i64) src7, 0);
+
+            STORE_DWORD(dst, out0);
+            dst += dst_stride;
+            STORE_DWORD(dst, out1);
+            dst += dst_stride;
+            STORE_DWORD(dst, out2);
+            dst += dst_stride;
+            STORE_DWORD(dst, out3);
+            dst += dst_stride;
+            STORE_DWORD(dst, out4);
+            dst += dst_stride;
+            STORE_DWORD(dst, out5);
+            dst += dst_stride;
+            STORE_DWORD(dst, out6);
+            dst += dst_stride;
+            STORE_DWORD(dst, out7);
+            dst += dst_stride;
+
+            LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+            src += (4 * src_stride);
+
+            out0 = __msa_copy_u_d((v2i64) src0, 0);
+            out1 = __msa_copy_u_d((v2i64) src1, 0);
+            out2 = __msa_copy_u_d((v2i64) src2, 0);
+            out3 = __msa_copy_u_d((v2i64) src3, 0);
+
+            STORE_DWORD(dst, out0);
+            dst += dst_stride;
+            STORE_DWORD(dst, out1);
+            dst += dst_stride;
+            STORE_DWORD(dst, out2);
+            dst += dst_stride;
+            STORE_DWORD(dst, out3);
+            dst += dst_stride;
+        }
+    } else if (0 == height % 8) {
+        for (cnt = height >> 3; cnt--;) {
+            LOAD_8VECS_UB(src, src_stride,
+                          src0, src1, src2, src3, src4, src5, src6, src7);
+            src += (8 * src_stride);
+
+            out0 = __msa_copy_u_d((v2i64) src0, 0);
+            out1 = __msa_copy_u_d((v2i64) src1, 0);
+            out2 = __msa_copy_u_d((v2i64) src2, 0);
+            out3 = __msa_copy_u_d((v2i64) src3, 0);
+            out4 = __msa_copy_u_d((v2i64) src4, 0);
+            out5 = __msa_copy_u_d((v2i64) src5, 0);
+            out6 = __msa_copy_u_d((v2i64) src6, 0);
+            out7 = __msa_copy_u_d((v2i64) src7, 0);
+
+            STORE_DWORD(dst, out0);
+            dst += dst_stride;
+            STORE_DWORD(dst, out1);
+            dst += dst_stride;
+            STORE_DWORD(dst, out2);
+            dst += dst_stride;
+            STORE_DWORD(dst, out3);
+            dst += dst_stride;
+            STORE_DWORD(dst, out4);
+            dst += dst_stride;
+            STORE_DWORD(dst, out5);
+            dst += dst_stride;
+            STORE_DWORD(dst, out6);
+            dst += dst_stride;
+            STORE_DWORD(dst, out7);
+            dst += dst_stride;
+        }
+    } else if (0 == height % 4) {
+        for (cnt = (height / 4); cnt--;) {
+            LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+            src += (4 * src_stride);
+
+            out0 = __msa_copy_u_d((v2i64) src0, 0);
+            out1 = __msa_copy_u_d((v2i64) src1, 0);
+            out2 = __msa_copy_u_d((v2i64) src2, 0);
+            out3 = __msa_copy_u_d((v2i64) src3, 0);
+
+            STORE_DWORD(dst, out0);
+            dst += dst_stride;
+            STORE_DWORD(dst, out1);
+            dst += dst_stride;
+            STORE_DWORD(dst, out2);
+            dst += dst_stride;
+            STORE_DWORD(dst, out3);
+            dst += dst_stride;
+        }
+    } else if (0 == height % 2) {
+        for (cnt = (height / 2); cnt--;) {
+            LOAD_2VECS_UB(src, src_stride, src0, src1);
+            src += (2 * src_stride);
+
+            out0 = __msa_copy_u_d((v2i64) src0, 0);
+            out1 = __msa_copy_u_d((v2i64) src1, 0);
+
+            STORE_DWORD(dst, out0);
+            dst += dst_stride;
+            STORE_DWORD(dst, out1);
+            dst += dst_stride;
+        }
+    }
+}
+
+static void copy_width12_msa(uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height)
+{
+    int32_t cnt;
+    uint64_t out0, out1, out2, out3, out4, out5, out6, out7;
+    uint32_t out8, out9, out10, out11, out12, out13, out14, out15;
+    v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+    for (cnt = 2; cnt--;) {
+        LOAD_8VECS_UB(src, src_stride,
+                      src0, src1, src2, src3, src4, src5, src6, src7);
+        src += (8 * src_stride);
+
+        out0 = __msa_copy_u_d((v2i64) src0, 0);
+        out1 = __msa_copy_u_d((v2i64) src1, 0);
+        out2 = __msa_copy_u_d((v2i64) src2, 0);
+        out3 = __msa_copy_u_d((v2i64) src3, 0);
+        out4 = __msa_copy_u_d((v2i64) src4, 0);
+        out5 = __msa_copy_u_d((v2i64) src5, 0);
+        out6 = __msa_copy_u_d((v2i64) src6, 0);
+        out7 = __msa_copy_u_d((v2i64) src7, 0);
+
+        out8 = __msa_copy_u_w((v4i32) src0, 2);
+        out9 = __msa_copy_u_w((v4i32) src1, 2);
+        out10 = __msa_copy_u_w((v4i32) src2, 2);
+        out11 = __msa_copy_u_w((v4i32) src3, 2);
+        out12 = __msa_copy_u_w((v4i32) src4, 2);
+        out13 = __msa_copy_u_w((v4i32) src5, 2);
+        out14 = __msa_copy_u_w((v4i32) src6, 2);
+        out15 = __msa_copy_u_w((v4i32) src7, 2);
+
+        STORE_DWORD(dst, out0);
+        STORE_WORD(dst + 8, out8);
+        dst += dst_stride;
+        STORE_DWORD(dst, out1);
+        STORE_WORD(dst + 8, out9);
+        dst += dst_stride;
+        STORE_DWORD(dst, out2);
+        STORE_WORD(dst + 8, out10);
+        dst += dst_stride;
+        STORE_DWORD(dst, out3);
+        STORE_WORD(dst + 8, out11);
+        dst += dst_stride;
+        STORE_DWORD(dst, out4);
+        STORE_WORD(dst + 8, out12);
+        dst += dst_stride;
+        STORE_DWORD(dst, out5);
+        STORE_WORD(dst + 8, out13);
+        dst += dst_stride;
+        STORE_DWORD(dst, out6);
+        STORE_WORD(dst + 8, out14);
+        dst += dst_stride;
+        STORE_DWORD(dst, out7);
+        STORE_WORD(dst + 8, out15);
+        dst += dst_stride;
+    }
+}
+
+static void copy_16multx8mult_msa(uint8_t *src, int32_t src_stride,
+                                  uint8_t *dst, int32_t dst_stride,
+                                  int32_t height, int32_t width)
+{
+    int32_t cnt, loop_cnt;
+    uint8_t *src_tmp, *dst_tmp;
+    v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+    for (cnt = (width >> 4); cnt--;) {
+        src_tmp = src;
+        dst_tmp = dst;
+
+        for (loop_cnt = (height >> 3); loop_cnt--;) {
+            LOAD_8VECS_UB(src_tmp, src_stride,
+                          src0, src1, src2, src3, src4, src5, src6, src7);
+            src_tmp += (8 * src_stride);
+
+            STORE_8VECS_UB(dst_tmp, dst_stride,
+                           src0, src1, src2, src3, src4, src5, src6, src7);
+            dst_tmp += (8 * dst_stride);
+        }
+
+        src += 16;
+        dst += 16;
+    }
+}
+
+static void copy_width16_msa(uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height)
+{
+    int32_t cnt;
+    v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+    if (0 == height % 12) {
+        for (cnt = (height / 12); cnt--;) {
+            LOAD_8VECS_UB(src, src_stride,
+                          src0, src1, src2, src3, src4, src5, src6, src7);
+            src += (8 * src_stride);
+
+            STORE_8VECS_UB(dst, dst_stride,
+                           src0, src1, src2, src3, src4, src5, src6, src7);
+            dst += (8 * dst_stride);
+
+            LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+            src += (4 * src_stride);
+
+            STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+            dst += (4 * dst_stride);
+        }
+    } else if (0 == height % 8) {
+        copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
+    } else if (0 == height % 4) {
+        for (cnt = (height >> 2); cnt--;) {
+            LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+            src += (4 * src_stride);
+
+            STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+            dst += (4 * dst_stride);
+        }
+    }
+}
+
+static void copy_width24_msa(uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height)
+{
+    copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 16);
+
+    copy_width8_msa(src + 16, src_stride, dst + 16, dst_stride, height);
+}
+
+static void copy_width32_msa(uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height)
+{
+    int32_t cnt;
+    v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
+
+    if (0 == height % 12) {
+        for (cnt = (height / 12); cnt--;) {
+            LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+            LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7);
+            src += (4 * src_stride);
+
+            STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+            STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
+            dst += (4 * dst_stride);
+
+            LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+            LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7);
+            src += (4 * src_stride);
+
+            STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+            STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
+            dst += (4 * dst_stride);
+
+            LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+            LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7);
+            src += (4 * src_stride);
+
+            STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+            STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
+            dst += (4 * dst_stride);
+        }
+    } else if (0 == height % 8) {
+        copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 32);
+    } else if (0 == height % 4) {
+        for (cnt = (height >> 2); cnt--;) {
+            LOAD_4VECS_UB(src, src_stride, src0, src1, src2, src3);
+            LOAD_4VECS_UB(src + 16, src_stride, src4, src5, src6, src7);
+            src += (4 * src_stride);
+
+            STORE_4VECS_UB(dst, dst_stride, src0, src1, src2, src3);
+            STORE_4VECS_UB(dst + 16, dst_stride, src4, src5, src6, src7);
+            dst += (4 * dst_stride);
+        }
+    }
+}
+
+static void copy_width48_msa(uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height)
+{
+    copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 48);
+}
+
+static void copy_width64_msa(uint8_t *src, int32_t src_stride,
+                             uint8_t *dst, int32_t dst_stride,
+                             int32_t height)
+{
+    copy_16multx8mult_msa(src, src_stride, dst, dst_stride, height, 64);
+}
+
 #define MC_COPY(WIDTH)                                                    \
 void ff_hevc_put_hevc_pel_pixels##WIDTH##_8_msa(int16_t *dst,             \
                                                 uint8_t *src,             \
@@ -2355,3 +4116,64 @@ MC_HV(qpel, hv, 48, 8, hv);
 MC_HV(qpel, hv, 64, 8, hv);
 
 #undef MC_HV
+
+#define UNI_MC_COPY(WIDTH)                                                 \
+void ff_hevc_put_hevc_uni_pel_pixels##WIDTH##_8_msa(uint8_t *dst,          \
+                                                    ptrdiff_t dst_stride,  \
+                                                    uint8_t *src,          \
+                                                    ptrdiff_t src_stride,  \
+                                                    int height,            \
+                                                    intptr_t mx,           \
+                                                    intptr_t my,           \
+                                                    int width)             \
+{                                                                          \
+    copy_width##WIDTH##_msa(src, src_stride, dst, dst_stride, height);     \
+}
+
+UNI_MC_COPY(8);
+UNI_MC_COPY(12);
+UNI_MC_COPY(16);
+UNI_MC_COPY(24);
+UNI_MC_COPY(32);
+UNI_MC_COPY(48);
+UNI_MC_COPY(64);
+
+#undef UNI_MC_COPY
+
+#define UNI_MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR)                           \
+void ff_hevc_put_hevc_uni_##PEL##_##DIR####WIDTH##_8_msa(uint8_t *dst,         \
+                                                         ptrdiff_t             \
+                                                         dst_stride,           \
+                                                         uint8_t *src,         \
+                                                         ptrdiff_t             \
+                                                         src_stride,           \
+                                                         int height,           \
+                                                         intptr_t mx,          \
+                                                         intptr_t my,          \
+                                                         int width)            \
+{                                                                              \
+    const int8_t *filter = ff_hevc_##PEL##_filters[FILT_DIR - 1];              \
+                                                                               \
+    common_##DIR1##_##TAP##t_##WIDTH##w_msa(src, src_stride, dst, dst_stride,  \
+                                            filter, height, 6);                \
+}
+
+UNI_MC(qpel, h, 4, 8, hz, mx);
+UNI_MC(qpel, h, 8, 8, hz, mx);
+UNI_MC(qpel, h, 12, 8, hz, mx);
+UNI_MC(qpel, h, 16, 8, hz, mx);
+UNI_MC(qpel, h, 24, 8, hz, mx);
+UNI_MC(qpel, h, 32, 8, hz, mx);
+UNI_MC(qpel, h, 48, 8, hz, mx);
+UNI_MC(qpel, h, 64, 8, hz, mx);
+
+UNI_MC(qpel, v, 4, 8, vt, my);
+UNI_MC(qpel, v, 8, 8, vt, my);
+UNI_MC(qpel, v, 12, 8, vt, my);
+UNI_MC(qpel, v, 16, 8, vt, my);
+UNI_MC(qpel, v, 24, 8, vt, my);
+UNI_MC(qpel, v, 32, 8, vt, my);
+UNI_MC(qpel, v, 48, 8, vt, my);
+UNI_MC(qpel, v, 64, 8, vt, my);
+
+#undef UNI_MC
diff --git a/libavutil/mips/generic_macros_msa.h b/libavutil/mips/generic_macros_msa.h
index 4f41b6e..48dc78e 100644
--- a/libavutil/mips/generic_macros_msa.h
+++ b/libavutil/mips/generic_macros_msa.h
@@ -38,6 +38,8 @@
     out_m;                        \
 } )
 
+#define LOAD_UH(psrc) *((const v8u16 *)(psrc))
+
 #define LOAD_SH(psrc)             \
 ( {                               \
     v8i16 out_m;                  \
@@ -45,6 +47,8 @@
     out_m;                        \
 } )
 
+#define LOAD_SW(psrc) *((const v4i32 *)(psrc))
+
 #define STORE_UB(vec, pdest) *((v16u8 *)(pdest)) = (vec)
 #define STORE_SB(vec, pdest) *((v16i8 *)(pdest)) = (vec)
 
@@ -273,6 +277,13 @@
     src3 = LOAD_WORD(psrc + 3 * src_stride);             \
 }
 
+#define LOAD_2VECS_UB(psrc, stride,     \
+                     val0, val1)        \
+{                                       \
+    val0 = LOAD_UB(psrc + 0 * stride);  \
+    val1 = LOAD_UB(psrc + 1 * stride);  \
+}
+
 #define LOAD_2VECS_SB(psrc, stride,     \
                       val0, val1)       \
 {                                       \
@@ -280,6 +291,22 @@
     val1 = LOAD_SB(psrc + 1 * stride);  \
 }
 
+#define LOAD_3VECS_UB(psrc, stride,      \
+                      val0, val1, val2)  \
+{                                        \
+    val0 = LOAD_UB(psrc + 0 * stride);   \
+    val1 = LOAD_UB(psrc + 1 * stride);   \
+    val2 = LOAD_UB(psrc + 2 * stride);   \
+}
+
+#define LOAD_3VECS_SB(psrc, stride,      \
+                      val0, val1, val2)  \
+{                                        \
+    val0 = LOAD_SB(psrc + 0 * stride);   \
+    val1 = LOAD_SB(psrc + 1 * stride);   \
+    val2 = LOAD_SB(psrc + 2 * stride);   \
+}
+
 #define LOAD_4VECS_UB(psrc, stride,            \
                       val0, val1, val2, val3)  \
 {                                              \
@@ -298,6 +325,22 @@
     val3 = LOAD_SB(psrc + 3 * stride);         \
 }
 
+#define LOAD_5VECS_UB(psrc, stride,                  \
+                      out0, out1, out2, out3, out4)  \
+{                                                    \
+    LOAD_4VECS_UB((psrc), (stride),                  \
+                  (out0), (out1), (out2), (out3));   \
+    out4 = LOAD_UB(psrc + 4 * stride);               \
+}
+
+#define LOAD_5VECS_SB(psrc, stride,                  \
+                      out0, out1, out2, out3, out4)  \
+{                                                    \
+    LOAD_4VECS_SB((psrc), (stride),                  \
+                  (out0), (out1), (out2), (out3));   \
+    out4 = LOAD_SB(psrc + 4 * stride);               \
+}
+
 #define LOAD_6VECS_SB(psrc, stride,                        \
                       out0, out1, out2, out3, out4, out5)  \
 {                                                          \
@@ -307,6 +350,19 @@
                   (out4), (out5));                         \
 }
 
+#define LOAD_7VECS_UB(psrc, stride,            \
+                      val0, val1, val2, val3,  \
+                      val4, val5, val6)        \
+{                                              \
+    val0 = LOAD_UB((psrc) + 0 * (stride));     \
+    val1 = LOAD_UB((psrc) + 1 * (stride));     \
+    val2 = LOAD_UB((psrc) + 2 * (stride));     \
+    val3 = LOAD_UB((psrc) + 3 * (stride));     \
+    val4 = LOAD_UB((psrc) + 4 * (stride));     \
+    val5 = LOAD_UB((psrc) + 5 * (stride));     \
+    val6 = LOAD_UB((psrc) + 6 * (stride));     \
+}
+
 #define LOAD_7VECS_SB(psrc, stride,            \
                       val0, val1, val2, val3,  \
                       val4, val5, val6)        \
@@ -340,6 +396,76 @@
                   (out4), (out5), (out6), (out7));  \
 }
 
+#define LOAD_2VECS_UH(psrc, stride,         \
+                      val0, val1)           \
+{                                           \
+    val0 = LOAD_UH((psrc) + 0 * (stride));  \
+    val1 = LOAD_UH((psrc) + 1 * (stride));  \
+}
+
+#define LOAD_2VECS_SH(psrc, stride,         \
+                      val0, val1)           \
+{                                           \
+    val0 = LOAD_SH((psrc) + 0 * (stride));  \
+    val1 = LOAD_SH((psrc) + 1 * (stride));  \
+}
+
+#define LOAD_4VECS_UH(psrc, stride,                            \
+                      val0, val1, val2, val3)                  \
+{                                                              \
+    LOAD_2VECS_UH((psrc), (stride), val0, val1);               \
+    LOAD_2VECS_UH((psrc + 2 * stride), (stride), val2, val3);  \
+}
+
+#define LOAD_4VECS_SH(psrc, stride,                            \
+                      val0, val1, val2, val3)                  \
+{                                                              \
+    LOAD_2VECS_SH((psrc), (stride), val0, val1);               \
+    LOAD_2VECS_SH((psrc + 2 * stride), (stride), val2, val3);  \
+}
+
+#define LOAD_6VECS_SH(psrc, stride,                            \
+                      val0, val1, val2, val3, val4, val5)      \
+{                                                              \
+    LOAD_2VECS_SH((psrc), (stride), val0, val1);               \
+    LOAD_2VECS_SH((psrc + 2 * stride), (stride), val2, val3);  \
+    LOAD_2VECS_SH((psrc + 4 * stride), (stride), val4, val5);  \
+}
+
+#define LOAD_8VECS_UH(psrc, stride,               \
+                      val0, val1, val2, val3,     \
+                      val4, val5, val6, val7)     \
+{                                                 \
+    LOAD_4VECS_UH((psrc), (stride),               \
+                  val0, val1, val2, val3);        \
+    LOAD_4VECS_UH((psrc + 4 * stride), (stride),  \
+                  val4, val5, val6, val7);        \
+}
+
+#define LOAD_8VECS_SH(psrc, stride,               \
+                      val0, val1, val2, val3,     \
+                      val4, val5, val6, val7)     \
+{                                                 \
+    LOAD_4VECS_SH((psrc), (stride),               \
+                  val0, val1, val2, val3);        \
+    LOAD_4VECS_SH((psrc + 4 * stride), (stride),  \
+                  val4, val5, val6, val7);        \
+}
+
+#define LOAD_16VECS_SH(psrc, stride,                \
+                       val0, val1, val2, val3,      \
+                       val4, val5, val6, val7,      \
+                       val8, val9, val10, val11,    \
+                       val12, val13, val14, val15)  \
+{                                                   \
+    LOAD_8VECS_SH((psrc), (stride),                 \
+                  val0, val1, val2, val3,           \
+                  val4, val5, val6, val7);          \
+    LOAD_8VECS_SH((psrc + 8 * (stride)), (stride),  \
+                  val8, val9, val10, val11,         \
+                  val12, val13, val14, val15);      \
+}
+
 #define STORE_4VECS_UB(dst_out, pitch,           \
                        in0, in1, in2, in3)       \
 {                                                \
@@ -358,6 +484,16 @@
     STORE_SB((in3), ((dst_out) + 3 * (pitch)));  \
 }
 
+#define STORE_8VECS_UB(dst_out, pitch_in,                 \
+                       in0, in1, in2, in3,                \
+                       in4, in5, in6, in7)                \
+{                                                         \
+    STORE_4VECS_UB(dst_out, pitch_in,                     \
+                   in0, in1, in2, in3);                   \
+    STORE_4VECS_UB((dst_out + 4 * (pitch_in)), pitch_in,  \
+                   in4, in5, in6, in7);                   \
+}
+
 #define STORE_2VECS_SH(ptr, stride,       \
                        in0, in1)          \
 {                                         \
@@ -419,6 +555,16 @@
     out_m;                                                \
 } )
 
+#define CLIP_UNSIGNED_CHAR_W(in)                          \
+( {                                                       \
+    v4i32 max_m = __msa_ldi_w(255);                       \
+    v4i32 out_m;                                          \
+                                                          \
+    out_m = __msa_maxi_s_w((v4i32) (in), 0);              \
+    out_m = __msa_min_s_w((v4i32) max_m, (v4i32) out_m);  \
+    out_m;                                                \
+} )
+
 #define TRANSPOSE4x4_B_UB(in0, in1, in2, in3,                   \
                           out0, out1, out2, out3)               \
 {                                                               \
@@ -458,6 +604,87 @@
     out3 = (v16u8) __msa_ilvl_d((v2i64) out0, (v2i64) out2);        \
 }
 
+#define TRANSPOSE8x4_B_UH(in0, in1, in2, in3,                       \
+                          in4, in5, in6, in7,                       \
+                          out0, out1, out2, out3)                   \
+{                                                                   \
+    v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                           \
+                                                                    \
+    tmp0_m = (v16i8) __msa_ilvev_w((v4i32) (in4), (v4i32) (in0));   \
+    tmp1_m = (v16i8) __msa_ilvev_w((v4i32) (in5), (v4i32) (in1));   \
+    tmp2_m = __msa_ilvr_b(tmp1_m, tmp0_m);                          \
+    tmp0_m = (v16i8) __msa_ilvev_w((v4i32) (in6), (v4i32) (in2));   \
+    tmp1_m = (v16i8) __msa_ilvev_w((v4i32) (in7), (v4i32) (in3));   \
+                                                                    \
+    tmp3_m = __msa_ilvr_b(tmp1_m, tmp0_m);                          \
+    tmp0_m = (v16i8) __msa_ilvr_h((v8i16) tmp3_m, (v8i16) tmp2_m);  \
+    tmp1_m = (v16i8) __msa_ilvl_h((v8i16) tmp3_m, (v8i16) tmp2_m);  \
+                                                                    \
+    out0 = (v8u16) __msa_ilvr_w((v4i32) tmp1_m, (v4i32) tmp0_m);    \
+    out2 = (v8u16) __msa_ilvl_w((v4i32) tmp1_m, (v4i32) tmp0_m);    \
+    out1 = (v8u16) __msa_ilvl_d((v2i64) out2, (v2i64) out0);        \
+    out3 = (v8u16) __msa_ilvl_d((v2i64) out0, (v2i64) out2);        \
+}
+
+#define TRANSPOSE8x8_B_UB(in0, in1, in2, in3,                     \
+                          in4, in5, in6, in7,                     \
+                          out0, out1, out2, out3,                 \
+                          out4, out5, out6, out7)                 \
+{                                                                 \
+    v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                         \
+    v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                         \
+    v16i8 zero_m = { 0 };                                         \
+                                                                  \
+    tmp0_m = __msa_ilvr_b((v16i8) (in2), (v16i8) (in0));          \
+    tmp1_m = __msa_ilvr_b((v16i8) (in3), (v16i8) (in1));          \
+    tmp2_m = __msa_ilvr_b((v16i8) (in6), (v16i8) (in4));          \
+    tmp3_m = __msa_ilvr_b((v16i8) (in7), (v16i8) (in5));          \
+                                                                  \
+    tmp4_m = __msa_ilvr_b((v16i8) tmp1_m, (v16i8) tmp0_m);        \
+    tmp5_m = __msa_ilvl_b((v16i8) tmp1_m, (v16i8) tmp0_m);        \
+    tmp6_m = __msa_ilvr_b((v16i8) tmp3_m, (v16i8) tmp2_m);        \
+    tmp7_m = __msa_ilvl_b((v16i8) tmp3_m, (v16i8) tmp2_m);        \
+                                                                  \
+    out0 = (v16u8) __msa_ilvr_w((v4i32) tmp6_m, (v4i32) tmp4_m);  \
+    out2 = (v16u8) __msa_ilvl_w((v4i32) tmp6_m, (v4i32) tmp4_m);  \
+    out4 = (v16u8) __msa_ilvr_w((v4i32) tmp7_m, (v4i32) tmp5_m);  \
+    out6 = (v16u8) __msa_ilvl_w((v4i32) tmp7_m, (v4i32) tmp5_m);  \
+                                                                  \
+    out1 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out0, 8);         \
+    out3 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out2, 8);         \
+    out5 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out4, 8);         \
+    out7 = (v16u8) __msa_sldi_b(zero_m, (v16i8) out6, 8);         \
+}
+
+#define TRANSPOSE8x8_B_UH(in0, in1, in2, in3,                     \
+                          in4, in5, in6, in7,                     \
+                          out0, out1, out2, out3,                 \
+                          out4, out5, out6, out7)                 \
+{                                                                 \
+    v16i8 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                         \
+    v16i8 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                         \
+    v16i8 zero_m = { 0 };                                         \
+                                                                  \
+    tmp0_m = __msa_ilvr_b((v16i8) (in2), (v16i8) (in0));          \
+    tmp1_m = __msa_ilvr_b((v16i8) (in3), (v16i8) (in1));          \
+    tmp2_m = __msa_ilvr_b((v16i8) (in6), (v16i8) (in4));          \
+    tmp3_m = __msa_ilvr_b((v16i8) (in7), (v16i8) (in5));          \
+                                                                  \
+    tmp4_m = __msa_ilvr_b((v16i8) tmp1_m, (v16i8) tmp0_m);        \
+    tmp5_m = __msa_ilvl_b((v16i8) tmp1_m, (v16i8) tmp0_m);        \
+    tmp6_m = __msa_ilvr_b((v16i8) tmp3_m, (v16i8) tmp2_m);        \
+    tmp7_m = __msa_ilvl_b((v16i8) tmp3_m, (v16i8) tmp2_m);        \
+                                                                  \
+    out0 = (v8u16) __msa_ilvr_w((v4i32) tmp6_m, (v4i32) tmp4_m);  \
+    out2 = (v8u16) __msa_ilvl_w((v4i32) tmp6_m, (v4i32) tmp4_m);  \
+    out4 = (v8u16) __msa_ilvr_w((v4i32) tmp7_m, (v4i32) tmp5_m);  \
+    out6 = (v8u16) __msa_ilvl_w((v4i32) tmp7_m, (v4i32) tmp5_m);  \
+    out1 = (v8u16) __msa_sldi_b(zero_m, (v16i8) out0, 8);         \
+    out3 = (v8u16) __msa_sldi_b(zero_m, (v16i8) out2, 8);         \
+    out5 = (v8u16) __msa_sldi_b(zero_m, (v16i8) out4, 8);         \
+    out7 = (v8u16) __msa_sldi_b(zero_m, (v16i8) out6, 8);         \
+}
+
 #define TRANSPOSE16x8_B_UB(in0, in1, in2, in3,                       \
                            in4, in5, in6, in7,                       \
                            in8, in9, in10, in11,                     \
@@ -509,6 +736,61 @@
     (out7) = (v16u8) __msa_ilvod_w((v4i32) tmp3_m, (v4i32) tmp2_m);  \
 }
 
+#define TRANSPOSE8x8_H_SH(in0, in1, in2, in3,                      \
+                          in4, in5, in6, in7,                      \
+                          out0, out1, out2, out3,                  \
+                          out4, out5, out6, out7)                  \
+{                                                                  \
+    v8i16 s0_m, s1_m;                                              \
+    v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m;                          \
+    v8i16 tmp4_m, tmp5_m, tmp6_m, tmp7_m;                          \
+                                                                   \
+    s0_m = __msa_ilvr_h((v8i16) (in6), (v8i16) (in4));             \
+    s1_m = __msa_ilvr_h((v8i16) (in7), (v8i16) (in5));             \
+    tmp0_m = __msa_ilvr_h((v8i16) s1_m, (v8i16) s0_m);             \
+    tmp1_m = __msa_ilvl_h((v8i16) s1_m, (v8i16) s0_m);             \
+                                                                   \
+    s0_m = __msa_ilvl_h((v8i16) (in6), (v8i16) (in4));             \
+    s1_m = __msa_ilvl_h((v8i16) (in7), (v8i16) (in5));             \
+    tmp2_m = __msa_ilvr_h((v8i16) s1_m, (v8i16) s0_m);             \
+    tmp3_m = __msa_ilvl_h((v8i16) s1_m, (v8i16) s0_m);             \
+                                                                   \
+    s0_m = __msa_ilvr_h((v8i16) (in2), (v8i16) (in0));             \
+    s1_m = __msa_ilvr_h((v8i16) (in3), (v8i16) (in1));             \
+    tmp4_m = __msa_ilvr_h((v8i16) s1_m, (v8i16) s0_m);             \
+    tmp5_m = __msa_ilvl_h((v8i16) s1_m, (v8i16) s0_m);             \
+                                                                   \
+    s0_m = __msa_ilvl_h((v8i16) (in2), (v8i16) (in0));             \
+    s1_m = __msa_ilvl_h((v8i16) (in3), (v8i16) (in1));             \
+    tmp6_m = __msa_ilvr_h((v8i16) s1_m, (v8i16) s0_m);             \
+    tmp7_m = __msa_ilvl_h((v8i16) s1_m, (v8i16) s0_m);             \
+                                                                   \
+    out0 = (v8i16) __msa_pckev_d((v2i64) tmp0_m, (v2i64) tmp4_m);  \
+    out1 = (v8i16) __msa_pckod_d((v2i64) tmp0_m, (v2i64) tmp4_m);  \
+    out2 = (v8i16) __msa_pckev_d((v2i64) tmp1_m, (v2i64) tmp5_m);  \
+    out3 = (v8i16) __msa_pckod_d((v2i64) tmp1_m, (v2i64) tmp5_m);  \
+    out4 = (v8i16) __msa_pckev_d((v2i64) tmp2_m, (v2i64) tmp6_m);  \
+    out5 = (v8i16) __msa_pckod_d((v2i64) tmp2_m, (v2i64) tmp6_m);  \
+    out6 = (v8i16) __msa_pckev_d((v2i64) tmp3_m, (v2i64) tmp7_m);  \
+    out7 = (v8i16) __msa_pckod_d((v2i64) tmp3_m, (v2i64) tmp7_m);  \
+}
+
+#define TRANSPOSE4x4_W(in0, in1, in2, in3,                    \
+                       out0, out1, out2, out3)                \
+{                                                             \
+    v4i32 s0_m, s1_m, s2_m, s3_m;                             \
+                                                              \
+    s0_m = __msa_ilvr_w((v4i32) (in1), (v4i32) (in0));        \
+    s1_m = __msa_ilvl_w((v4i32) (in1), (v4i32) (in0));        \
+    s2_m = __msa_ilvr_w((v4i32) (in3), (v4i32) (in2));        \
+    s3_m = __msa_ilvl_w((v4i32) (in3), (v4i32) (in2));        \
+                                                              \
+    out0 = (v4i32) __msa_ilvr_d((v2i64) s2_m, (v2i64) s0_m);  \
+    out1 = (v4i32) __msa_ilvl_d((v2i64) s2_m, (v2i64) s0_m);  \
+    out2 = (v4i32) __msa_ilvr_d((v2i64) s3_m, (v2i64) s1_m);  \
+    out3 = (v4i32) __msa_ilvl_d((v2i64) s3_m, (v2i64) s1_m);  \
+}
+
 #define ILV_B_LRLR_SB(in0, in1, in2, in3,               \
                       out0, out1, out2, out3)           \
 {                                                       \
@@ -527,6 +809,24 @@
     out3 = (v8u16) __msa_ilvr_b((v16i8) (in3), (v16i8) (in2));  \
 }
 
+#define ILV_B_LRLR_SH(in0, in1, in2, in3,                       \
+                      out0, out1, out2, out3)                   \
+{                                                               \
+    out0 = (v8i16) __msa_ilvl_b((v16i8) (in1), (v16i8) (in0));  \
+    out1 = (v8i16) __msa_ilvr_b((v16i8) (in1), (v16i8) (in0));  \
+    out2 = (v8i16) __msa_ilvl_b((v16i8) (in3), (v16i8) (in2));  \
+    out3 = (v8i16) __msa_ilvr_b((v16i8) (in3), (v16i8) (in2));  \
+}
+
+#define ILV_H_LRLR_SW(in0, in1, in2, in3,                       \
+                      out0, out1, out2, out3)                   \
+{                                                               \
+    out0 = (v4i32) __msa_ilvl_h((v8i16) (in1), (v8i16) (in0));  \
+    out1 = (v4i32) __msa_ilvr_h((v8i16) (in1), (v8i16) (in0));  \
+    out2 = (v4i32) __msa_ilvl_h((v8i16) (in3), (v8i16) (in2));  \
+    out3 = (v4i32) __msa_ilvr_h((v8i16) (in3), (v8i16) (in2));  \
+}
+
 #define ILVR_B_2VECS_UB(in0_r, in1_r, in0_l, in1_l,                 \
                         out0, out1)                                 \
 {                                                                   \
@@ -597,6 +897,13 @@
     out1 = (v8i16) __msa_ilvr_b((v16i8) (in1_l), (v16i8) (in1_r));  \
 }
 
+#define ILVR_B_3VECS_SH(in0_r, in1_r, in2_r, in0_l, in1_l, in2_l,   \
+                        out0, out1, out2)                           \
+{                                                                   \
+    ILVR_B_2VECS_SH(in0_r, in1_r, in0_l, in1_l,  out0, out1);       \
+    out2 = (v8i16) __msa_ilvr_b((v16i8) (in2_l), (v16i8) (in2_r));  \
+}
+
 #define ILVR_B_4VECS_UH(in0_r, in1_r, in2_r, in3_r,  \
                         in0_l, in1_l, in2_l, in3_l,  \
                         out0, out1, out2, out3)      \
@@ -624,6 +931,16 @@
     out1 = __msa_ilvr_h((v8i16) (in1_l), (v8i16) (in1_r));  \
 }
 
+#define ILVR_H_4VECS_SH(in0_r, in1_r, in2_r, in3_r,  \
+                        in0_l, in1_l, in2_l, in3_l,  \
+                        out0, out1, out2, out3)      \
+{                                                    \
+    ILVR_H_2VECS_SH(in0_r, in1_r, in0_l, in1_l,      \
+                    out0, out1);                     \
+    ILVR_H_2VECS_SH(in2_r, in3_r, in2_l, in3_l,      \
+                    out2, out3);                     \
+}
+
 #define ILVR_H_6VECS_SH(in0_r, in1_r, in2_r,     \
                         in3_r, in4_r, in5_r,     \
                         in0_l, in1_l, in2_l,     \
@@ -639,6 +956,23 @@
                     out4, out5);                 \
 }
 
+#define ILVR_H_8VECS_SH(in0_r, in1_r, in2_r, in3_r,  \
+                        in4_r, in5_r, in6_r, in7_r,  \
+                        in0_l, in1_l, in2_l, in3_l,  \
+                        in4_l, in5_l, in6_l, in7_l,  \
+                        out0, out1, out2, out3,      \
+                        out4, out5, out6, out7)      \
+{                                                    \
+    ILVR_H_2VECS_SH(in0_r, in1_r, in0_l, in1_l,      \
+                    out0, out1);                     \
+    ILVR_H_2VECS_SH(in2_r, in3_r, in2_l, in3_l,      \
+                    out2, out3);                     \
+    ILVR_H_2VECS_SH(in4_r, in5_r, in4_l, in5_l,      \
+                    out4, out5);                     \
+    ILVR_H_2VECS_SH(in6_r, in7_r, in6_l, in7_l,      \
+                    out6, out7);                     \
+}
+
 #define ILVL_B_2VECS_SB(in0_r, in1_r, in0_l, in1_l,         \
                         out0, out1)                         \
 {                                                           \
@@ -678,6 +1012,16 @@
     out1 = __msa_ilvl_h((v8i16) (in1_l), (v8i16) (in1_r));  \
 }
 
+#define ILVL_H_4VECS_SH(in0_r, in1_r, in2_r, in3_r,  \
+                        in0_l, in1_l, in2_l, in3_l,  \
+                        out0, out1, out2, out3)      \
+{                                                    \
+    ILVL_H_2VECS_SH(in0_r, in1_r, in0_l, in1_l,      \
+                    out0, out1);                     \
+    ILVL_H_2VECS_SH(in2_r, in3_r, in2_l, in3_l,      \
+                    out2, out3);                     \
+}
+
 #define ILVL_H_6VECS_SH(in0_r, in1_r, in2_r,     \
                         in3_r, in4_r, in5_r,     \
                         in0_l, in1_l, in2_l,     \
@@ -693,6 +1037,23 @@
                     out4, out5);                 \
 }
 
+#define ILVL_H_8VECS_SH(in0_r, in1_r, in2_r, in3_r,  \
+                        in4_r, in5_r, in6_r, in7_r,  \
+                        in0_l, in1_l, in2_l, in3_l,  \
+                        in4_l, in5_l, in6_l, in7_l,  \
+                        out0, out1, out2, out3,      \
+                        out4, out5, out6, out7)      \
+{                                                    \
+    ILVL_H_2VECS_SH(in0_r, in1_r, in0_l, in1_l,      \
+                    out0, out1);                     \
+    ILVL_H_2VECS_SH(in2_r, in3_r, in2_l, in3_l,      \
+                    out2, out3);                     \
+    ILVL_H_2VECS_SH(in4_r, in5_r, in4_l, in5_l,      \
+                    out4, out5);                     \
+    ILVL_H_2VECS_SH(in6_r, in7_r, in6_l, in7_l,      \
+                    out6, out7);                     \
+}
+
 #define ILVR_D_2VECS_SB(out0, in0_l, in0_r,                         \
                         out1, in1_l, in1_r)                         \
 {                                                                   \
@@ -756,6 +1117,12 @@
     out3 = __msa_pckev_b((v16i8) (in3_l), (v16i8) (in3_r));  \
 }
 
+#define XORI_B_2VECS_UB(val0, val1, out0, out1, xor_val)  \
+{                                                         \
+    out0 = __msa_xori_b((v16u8) (val0), (xor_val));       \
+    out1 = __msa_xori_b((v16u8) (val1), (xor_val));       \
+}
+
 #define XORI_B_2VECS_SB(val0, val1,                          \
                         out0, out1, xor_val)                 \
 {                                                            \
@@ -772,6 +1139,13 @@
     out2 = (v16i8) __msa_xori_b((v16u8) (val2), (xor_val));  \
 }
 
+#define XORI_B_4VECS_UB(val0, val1, val2, val3,           \
+                        out0, out1, out2, out3, xor_val)  \
+{                                                         \
+    XORI_B_2VECS_UB(val0, val1, out0, out1, xor_val);     \
+    XORI_B_2VECS_UB(val2, val3, out2, out3, xor_val);     \
+}
+
 #define XORI_B_4VECS_SB(val0, val1, val2, val3,  \
                         out0, out1, out2, out3,  \
                         xor_val)                 \
@@ -792,6 +1166,15 @@
                     out3, out4, xor_val);              \
 }
 
+#define XORI_B_6VECS_SB(val0, val1, val2, val3, val4, val5,  \
+                        out0, out1, out2, out3, out4, out5,  \
+                        xor_val)                             \
+{                                                            \
+    XORI_B_4VECS_SB(val0, val1, val2, val3,                  \
+                    out0, out1, out2, out3, xor_val);        \
+    XORI_B_2VECS_SB(val4, val5,out4, out5, xor_val);         \
+}
+
 #define XORI_B_7VECS_SB(val0, val1, val2, val3,        \
                         val4, val5, val6,              \
                         out0, out1, out2, out3,        \
@@ -842,6 +1225,140 @@
     out3 = (v8u16) __msa_srl_h((v8i16) (in3), (v8i16) (shift_right_vec));  \
 }
 
+#define SRAR_SATURATE_SIGNED_H(input, right_shift_vec, sat_val)        \
+( {                                                                    \
+    v8i16 out_m;                                                       \
+                                                                       \
+    out_m = __msa_srar_h((v8i16) (input), (v8i16) (right_shift_vec));  \
+    out_m = __msa_sat_s_h(out_m, (sat_val));                           \
+    out_m;                                                             \
+} )
+
+#define PCKEV_2B_XORI128_STORE_4_BYTES_4(in1, in2,         \
+                                         pdst, stride)     \
+{                                                          \
+    uint32_t out0_m, out1_m, out2_m, out3_m;               \
+    v16i8 tmp0_m;                                          \
+    uint8_t *dst_m = (uint8_t *) (pdst);                   \
+                                                           \
+    tmp0_m = __msa_pckev_b((v16i8) (in2), (v16i8) (in1));  \
+    tmp0_m = (v16i8) __msa_xori_b((v16u8) tmp0_m, 128);    \
+                                                           \
+    out0_m = __msa_copy_u_w((v4i32) tmp0_m, 0);            \
+    out1_m = __msa_copy_u_w((v4i32) tmp0_m, 1);            \
+    out2_m = __msa_copy_u_w((v4i32) tmp0_m, 2);            \
+    out3_m = __msa_copy_u_w((v4i32) tmp0_m, 3);            \
+                                                           \
+    STORE_WORD(dst_m, out0_m);                             \
+    dst_m += stride;                                       \
+    STORE_WORD(dst_m, out1_m);                             \
+    dst_m += stride;                                       \
+    STORE_WORD(dst_m, out2_m);                             \
+    dst_m += stride;                                       \
+    STORE_WORD(dst_m, out3_m);                             \
+}
+
+#define PCKEV_B_XORI128_STORE_8_BYTES(in1, in2, pdest)    \
+{                                                         \
+    uint64_t out_m;                                       \
+    v16i8 tmp_m;                                          \
+                                                          \
+    tmp_m = __msa_pckev_b((v16i8) (in1), (v16i8) (in2));  \
+    tmp_m = (v16i8) __msa_xori_b((v16u8) tmp_m, 128);     \
+    out_m = __msa_copy_u_d((v2i64) tmp_m, 0);             \
+    STORE_DWORD((pdest), out_m);                          \
+}
+
+#define PCKEV_B_XORI128_STORE_8_BYTES_2(in1, in2,          \
+                                        pdst, stride)      \
+{                                                          \
+    uint64_t out0_m, out1_m;                               \
+    v16i8 tmp0_m;                                          \
+    uint8_t *dst_m = (uint8_t *) (pdst);                   \
+                                                           \
+    tmp0_m = __msa_pckev_b((v16i8) (in2), (v16i8) (in1));  \
+    tmp0_m = (v16i8) __msa_xori_b((v16u8) tmp0_m, 128);    \
+                                                           \
+    out0_m = __msa_copy_u_d((v2i64) tmp0_m, 0);            \
+    out1_m = __msa_copy_u_d((v2i64) tmp0_m, 1);            \
+                                                           \
+    STORE_DWORD(dst_m, out0_m);                            \
+    dst_m += stride;                                       \
+    STORE_DWORD(dst_m, out1_m);                            \
+}
+
+#define PCKEV_B_XORI128_STORE_6_BYTES_4(in1, in2, in3, in4,  \
+                                        pdst, stride)        \
+{                                                            \
+    uint32_t out0_m, out1_m, out2_m, out3_m;                 \
+    uint16_t out4_m, out5_m, out6_m, out7_m;                 \
+    v16i8 tmp0_m, tmp1_m;                                    \
+    uint8_t *dst_m = (uint8_t *) (pdst);                     \
+                                                             \
+    tmp0_m = __msa_pckev_b((v16i8) (in2), (v16i8) (in1));    \
+    tmp1_m = __msa_pckev_b((v16i8) (in4), (v16i8) (in3));    \
+                                                             \
+    tmp0_m = (v16i8) __msa_xori_b((v16u8) tmp0_m, 128);      \
+    tmp1_m = (v16i8) __msa_xori_b((v16u8) tmp1_m, 128);      \
+                                                             \
+    out0_m = __msa_copy_u_w((v4i32) tmp0_m, 0);              \
+    out1_m = __msa_copy_u_w((v4i32) tmp0_m, 2);              \
+    out2_m = __msa_copy_u_w((v4i32) tmp1_m, 0);              \
+    out3_m = __msa_copy_u_w((v4i32) tmp1_m, 2);              \
+                                                             \
+    out4_m = __msa_copy_u_h((v8i16) tmp0_m, 2);              \
+    out5_m = __msa_copy_u_h((v8i16) tmp0_m, 6);              \
+    out6_m = __msa_copy_u_h((v8i16) tmp1_m, 2);              \
+    out7_m = __msa_copy_u_h((v8i16) tmp1_m, 6);              \
+                                                             \
+    STORE_WORD(dst_m, out0_m);                               \
+    STORE_HWORD((dst_m + 4), out4_m);                        \
+    dst_m += stride;                                         \
+    STORE_WORD(dst_m, out1_m);                               \
+    STORE_HWORD((dst_m + 4), out5_m);                        \
+    dst_m += stride;                                         \
+    STORE_WORD(dst_m, out2_m);                               \
+    STORE_HWORD((dst_m + 4), out6_m);                        \
+    dst_m += stride;                                         \
+    STORE_WORD(dst_m, out3_m);                               \
+    STORE_HWORD((dst_m + 4), out7_m);                        \
+}
+
+#define PCKEV_B_4_XORI128_STORE_8_BYTES_4(in1, in2, in3, in4,  \
+                                          pdst, stride)        \
+{                                                              \
+    uint64_t out0_m, out1_m, out2_m, out3_m;                   \
+    v16i8 tmp0_m, tmp1_m;                                      \
+    uint8_t *dst_m = (uint8_t *) (pdst);                       \
+                                                               \
+    tmp0_m = __msa_pckev_b((v16i8) (in2), (v16i8) (in1));      \
+    tmp1_m = __msa_pckev_b((v16i8) (in4), (v16i8) (in3));      \
+                                                               \
+    tmp0_m = (v16i8) __msa_xori_b((v16u8) tmp0_m, 128);        \
+    tmp1_m = (v16i8) __msa_xori_b((v16u8) tmp1_m, 128);        \
+                                                               \
+    out0_m = __msa_copy_u_d((v2i64) tmp0_m, 0);                \
+    out1_m = __msa_copy_u_d((v2i64) tmp0_m, 1);                \
+    out2_m = __msa_copy_u_d((v2i64) tmp1_m, 0);                \
+    out3_m = __msa_copy_u_d((v2i64) tmp1_m, 1);                \
+                                                               \
+    STORE_DWORD(dst_m, out0_m);                                \
+    dst_m += stride;                                           \
+    STORE_DWORD(dst_m, out1_m);                                \
+    dst_m += stride;                                           \
+    STORE_DWORD(dst_m, out2_m);                                \
+    dst_m += stride;                                           \
+    STORE_DWORD(dst_m, out3_m);                                \
+}
+#define PCKEV_B_XORI128_STORE_VEC(in1, in2, pdest)        \
+{                                                         \
+    v16i8 tmp_m;                                          \
+                                                          \
+    tmp_m = __msa_pckev_b((v16i8) (in1), (v16i8) (in2));  \
+    tmp_m = (v16i8) __msa_xori_b((v16u8) tmp_m, 128);     \
+    STORE_SB(tmp_m, (pdest));                             \
+}
+
 #define PCKEV_B_STORE_4_BYTES_4(in1, in2, in3, in4,        \
                                 pdst, stride)              \
 {                                                          \
@@ -890,4 +1407,20 @@
     STORE_DWORD(dst_m, out3_m);                            \
 }
 
+#define UNPCK_SIGNED_B_TO_H(in, out1, out2)            \
+{                                                      \
+    v16i8 tmp_m;                                       \
+                                                       \
+    tmp_m = __msa_clti_s_b((v16i8) (in), 0);           \
+    out1 = (v8i16) __msa_ilvr_b(tmp_m, (v16i8) (in));  \
+    out2 = (v8i16) __msa_ilvl_b(tmp_m, (v16i8) (in));  \
+}
+
+#define SWAP_VECS(Vec0, Vec1)  \
+{                              \
+    Vec0 = Vec0 ^ Vec1;        \
+    Vec1 = Vec0 ^ Vec1;        \
+    Vec0 = Vec0 ^ Vec1;        \
+}
+
 #endif  /* AVUTIL_MIPS_GENERIC_MACROS_MSA_H */
-- 
2.3.2



More information about the ffmpeg-devel mailing list