[FFmpeg-cvslog] lavc/rv40dsp: R-V V chroma_mc
sunyuechi
git at videolan.org
Fri May 3 18:01:57 EEST 2024
ffmpeg | branch: master | sunyuechi <sunyuechi at iscas.ac.cn> | Tue Apr 30 18:24:00 2024 +0800| [5bc3b7f51308b8027e5468ef60d8336a960193e2] | committer: Rémi Denis-Courmont
lavc/rv40dsp: R-V V chroma_mc
This is similar to h264, but here we use manual_avg instead of vaaddu
because rv40's OP differs from h264. If we use vaaddu,
rv40 would need to repeatedly switch between vxrm=0 and vxrm=2,
and switching vxrm is very slow.
C908:
avg_chroma_mc4_c: 2330.0
avg_chroma_mc4_rvv_i32: 602.7
avg_chroma_mc8_c: 1211.0
avg_chroma_mc8_rvv_i32: 602.7
put_chroma_mc4_c: 1825.0
put_chroma_mc4_rvv_i32: 414.7
put_chroma_mc8_c: 932.0
put_chroma_mc8_rvv_i32: 414.7
Signed-off-by: Rémi Denis-Courmont <remi at remlab.net>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=5bc3b7f51308b8027e5468ef60d8336a960193e2
---
libavcodec/riscv/Makefile | 2 +
libavcodec/riscv/rv40dsp_init.c | 51 ++++++
libavcodec/riscv/rv40dsp_rvv.S | 371 ++++++++++++++++++++++++++++++++++++++++
libavcodec/rv34dsp.h | 1 +
libavcodec/rv40dsp.c | 2 +
5 files changed, 427 insertions(+)
diff --git a/libavcodec/riscv/Makefile b/libavcodec/riscv/Makefile
index 69ccd0896d..11d47f9a57 100644
--- a/libavcodec/riscv/Makefile
+++ b/libavcodec/riscv/Makefile
@@ -48,6 +48,8 @@ RV-OBJS-$(CONFIG_PIXBLOCKDSP) += riscv/pixblockdsp_rvi.o
RVV-OBJS-$(CONFIG_PIXBLOCKDSP) += riscv/pixblockdsp_rvv.o
OBJS-$(CONFIG_RV34DSP) += riscv/rv34dsp_init.o
RVV-OBJS-$(CONFIG_RV34DSP) += riscv/rv34dsp_rvv.o
+OBJS-$(CONFIG_RV40_DECODER) += riscv/rv40dsp_init.o
+RVV-OBJS-$(CONFIG_RV40_DECODER) += riscv/rv40dsp_rvv.o
OBJS-$(CONFIG_SVQ1_ENCODER) += riscv/svqenc_init.o
RVV-OBJS-$(CONFIG_SVQ1_ENCODER) += riscv/svqenc_rvv.o
OBJS-$(CONFIG_TAK_DECODER) += riscv/takdsp_init.o
diff --git a/libavcodec/riscv/rv40dsp_init.c b/libavcodec/riscv/rv40dsp_init.c
new file mode 100644
index 0000000000..f5a5510b28
--- /dev/null
+++ b/libavcodec/riscv/rv40dsp_init.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2024 Institue of Software Chinese Academy of Sciences (ISCAS).
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/riscv/cpu.h"
+#include "libavcodec/rv34dsp.h"
+
+void ff_put_rv40_chroma_mc8_rvv(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_put_rv40_chroma_mc4_rvv(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+
+void ff_avg_rv40_chroma_mc8_rvv(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+void ff_avg_rv40_chroma_mc4_rvv(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+ int h, int x, int y);
+
+av_cold void ff_rv40dsp_init_riscv(RV34DSPContext *c)
+{
+#if HAVE_RVV
+ int flags = av_get_cpu_flags();
+
+ if ((flags & AV_CPU_FLAG_RVV_I32) && ff_get_rv_vlenb() >= 16 &&
+ (flags & AV_CPU_FLAG_RVB_ADDR)) {
+ c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_rvv;
+ c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_rvv;
+ c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_rvv;
+ c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_rvv;
+ }
+#endif
+}
diff --git a/libavcodec/riscv/rv40dsp_rvv.S b/libavcodec/riscv/rv40dsp_rvv.S
new file mode 100644
index 0000000000..e49345ef70
--- /dev/null
+++ b/libavcodec/riscv/rv40dsp_rvv.S
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2024 Institue of Software Chinese Academy of Sciences (ISCAS).
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/riscv/asm.S"
+
+.macro manual_avg dst src1 src2
+ vadd.vv \dst, \src1, \src2
+ vadd.vi \dst, \dst, 1
+ vsrl.vi \dst, \dst, 1
+.endm
+
+.macro do_chroma_mc type unroll
+ csrwi vxrm, 2
+ slli t2, a5, 3
+ mul t1, a5, a4
+ sh3add a5, a4, t2
+ slli a4, a4, 3
+ sub a5, t1, a5
+ sub a7, a4, t1
+ addi a6, a5, 64
+ sub t0, t2, t1
+ vsetvli t3, t6, e8, m1, ta, mu
+ beqz t1, 2f
+ blez a3, 8f
+ li t4, 0
+ li t2, 0
+ li t5, 1
+ addi a5, t3, 1
+ slli t3, a2, (1 + \unroll)
+1: # if (xy != 0)
+ add a4, a1, t4
+ vsetvli zero, a5, e8, m1, ta, ma
+ .ifc \unroll,1
+ addi t2, t2, 4
+ .else
+ addi t2, t2, 2
+ .endif
+ vle8.v v10, (a4)
+ add a4, a4, a2
+ vslide1down.vx v11, v10, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmulu.vx v8, v10, a6
+ vwmaccu.vx v8, a7, v11
+ vsetvli zero, a5, e8, m1, ta, ma
+ vle8.v v12, (a4)
+ vsetvli zero, t6, e8, m1, ta, ma
+ add a4, a4, a2
+ vwmaccu.vx v8, t0, v12
+ vsetvli zero, a5, e8, m1, ta, ma
+ vslide1down.vx v13, v12, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmulu.vx v10, v12, a6
+ vwmaccu.vx v8, t1, v13
+ vwmaccu.vx v10, a7, v13
+ vsetvli zero, a5, e8, m1, ta, ma
+ vle8.v v14, (a4)
+ vsetvli zero, t6, e8, m1, ta, ma
+ add a4, a4, a2
+ vwmaccu.vx v10, t0, v14
+ vsetvli zero, a5, e8, m1, ta, ma
+ vslide1down.vx v15, v14, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmulu.vx v12, v14, a6
+ vwmaccu.vx v10, t1, v15
+ vwmaccu.vx v12, a7, v15
+ vnclipu.wi v15, v8, 6
+ .ifc \type,avg
+ vle8.v v9, (a0)
+ manual_avg v15, v15, v9
+ .endif
+ vse8.v v15, (a0)
+ add a0, a0, a2
+ vnclipu.wi v8, v10, 6
+ .ifc \type,avg
+ vle8.v v9, (a0)
+ manual_avg v8, v8, v9
+ .endif
+ add t4, t4, t3
+ vse8.v v8, (a0)
+ add a0, a0, a2
+ .ifc \unroll,1
+ vsetvli zero, a5, e8, m1, ta, ma
+ vle8.v v14, (a4)
+ vsetvli zero, t6, e8, m1, ta, ma
+ add a4, a4, a2
+ vwmaccu.vx v12, t0, v14
+ vsetvli zero, a5, e8, m1, ta, ma
+ vslide1down.vx v15, v14, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmulu.vx v16, v14, a6
+ vwmaccu.vx v12, t1, v15
+ vwmaccu.vx v16, a7, v15
+ vsetvli zero, a5, e8, m1, ta, ma
+ vle8.v v14, (a4)
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmaccu.vx v16, t0, v14
+ vsetvli zero, a5, e8, m1, ta, ma
+ vslide1down.vx v14, v14, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmaccu.vx v16, t1, v14
+ vnclipu.wi v8, v12, 6
+ .ifc \type,avg
+ vle8.v v9, (a0)
+ manual_avg v8, v8, v9
+ .endif
+ vse8.v v8, (a0)
+ add a0, a0, a2
+ vnclipu.wi v8, v16, 6
+ .ifc \type,avg
+ vle8.v v9, (a0)
+ manual_avg v8, v8, v9
+ .endif
+ vse8.v v8, (a0)
+ add a0, a0, a2
+ .endif
+ blt t2, a3, 1b
+ j 8f
+2:
+ bnez a4, 4f
+ beqz t2, 4f
+ blez a3, 8f
+ li a4, 0
+ li t1, 0
+ slli a7, a2, (1 + \unroll)
+3: # if ((x8 - xy) == 0 && (y8 -xy) != 0)
+ add a5, a1, a4
+ vsetvli zero, zero, e8, m1, ta, ma
+ .ifc \unroll,1
+ addi t1, t1, 4
+ .else
+ addi t1, t1, 2
+ .endif
+ vle8.v v8, (a5)
+ add a5, a5, a2
+ add t2, a5, a2
+ vwmulu.vx v10, v8, a6
+ vle8.v v8, (a5)
+ vwmulu.vx v12, v8, a6
+ vle8.v v9, (t2)
+ add t2, t2, a2
+ add a5, t2, a2
+ vwmaccu.vx v10, t0, v8
+ add a4, a4, a7
+ vwmaccu.vx v12, t0, v9
+ vnclipu.wi v15, v10, 6
+ vwmulu.vx v10, v9, a6
+ vnclipu.wi v9, v12, 6
+ .ifc \type,avg
+ vle8.v v16, (a0)
+ manual_avg v15, v15, v16
+ .endif
+ vse8.v v15, (a0)
+ add a0, a0, a2
+ .ifc \type,avg
+ vle8.v v16, (a0)
+ manual_avg v9, v9, v16
+ .endif
+ vse8.v v9, (a0)
+ add a0, a0, a2
+ .ifc \unroll,1
+ vle8.v v8, (t2)
+ vle8.v v14, (a5)
+ vwmaccu.vx v10, t0, v8
+ vwmulu.vx v12, v8, a6
+ vnclipu.wi v8, v10, 6
+ vwmaccu.vx v12, t0, v14
+ .ifc \type,avg
+ vle8.v v16, (a0)
+ manual_avg v8, v8, v16
+ .endif
+ vse8.v v8, (a0)
+ add a0, a0, a2
+ vnclipu.wi v8, v12, 6
+ .ifc \type,avg
+ vle8.v v16, (a0)
+ manual_avg v8, v8, v16
+ .endif
+ vse8.v v8, (a0)
+ add a0, a0, a2
+ .endif
+ blt t1, a3, 3b
+ j 8f
+4:
+ beqz a4, 6f
+ bnez t2, 6f
+ blez a3, 8f
+ li a4, 0
+ li t2, 0
+ addi t0, t3, 1
+ slli t1, a2, (1 + \unroll)
+5: # if ((x8 - xy) != 0 && (y8 -xy) == 0)
+ add a5, a1, a4
+ vsetvli zero, t0, e8, m1, ta, ma
+ .ifc \unroll,1
+ addi t2, t2, 4
+ .else
+ addi t2, t2, 2
+ .endif
+ vle8.v v8, (a5)
+ add a5, a5, a2
+ vslide1down.vx v9, v8, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmulu.vx v10, v8, a6
+ vwmaccu.vx v10, a7, v9
+ vsetvli zero, t0, e8, m1, ta, ma
+ vle8.v v8, (a5)
+ add a5, a5, a2
+ vslide1down.vx v9, v8, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmulu.vx v12, v8, a6
+ vwmaccu.vx v12, a7, v9
+ vnclipu.wi v16, v10, 6
+ .ifc \type,avg
+ vle8.v v18, (a0)
+ manual_avg v16, v16, v18
+ .endif
+ vse8.v v16, (a0)
+ add a0, a0, a2
+ vnclipu.wi v10, v12, 6
+ .ifc \type,avg
+ vle8.v v18, (a0)
+ manual_avg v10, v10, v18
+ .endif
+ add a4, a4, t1
+ vse8.v v10, (a0)
+ add a0, a0, a2
+ .ifc \unroll,1
+ vsetvli zero, t0, e8, m1, ta, ma
+ vle8.v v8, (a5)
+ add a5, a5, a2
+ vslide1down.vx v9, v8, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmulu.vx v14, v8, a6
+ vwmaccu.vx v14, a7, v9
+ vsetvli zero, t0, e8, m1, ta, ma
+ vle8.v v8, (a5)
+ vslide1down.vx v9, v8, t5
+ vsetvli zero, t6, e8, m1, ta, ma
+ vwmulu.vx v12, v8, a6
+ vnclipu.wi v8, v14, 6
+ vwmaccu.vx v12, a7, v9
+ .ifc \type,avg
+ vle8.v v18, (a0)
+ manual_avg v8, v8, v18
+ .endif
+ vse8.v v8, (a0)
+ add a0, a0, a2
+ vnclipu.wi v8, v12, 6
+ .ifc \type,avg
+ vle8.v v18, (a0)
+ manual_avg v8, v8, v18
+ .endif
+ vse8.v v8, (a0)
+ add a0, a0, a2
+ .endif
+ blt t2, a3, 5b
+ j 8f
+6:
+ blez a3, 8f
+ li a4, 0
+ li t2, 0
+ slli a7, a2, (1 + \unroll)
+7: # the final else, none of the above conditions are met
+ add t0, a1, a4
+ vsetvli zero, zero, e8, m1, ta, ma
+ add a5, a0, a4
+ add a4, a4, a7
+ .ifc \unroll,1
+ addi t2, t2, 4
+ .else
+ addi t2, t2, 2
+ .endif
+ vle8.v v8, (t0)
+ add t0, t0, a2
+ add t1, t0, a2
+ vwmulu.vx v10, v8, a6
+ vle8.v v8, (t0)
+ add t0, t1, a2
+ vnclipu.wi v13, v10, 6
+ vwmulu.vx v10, v8, a6
+ .ifc \type,avg
+ vle8.v v18, (a5)
+ manual_avg v13, v13, v18
+ .endif
+ vse8.v v13, (a5)
+ add a5, a5, a2
+ vnclipu.wi v8, v10, 6
+ .ifc \type,avg
+ vle8.v v18, (a5)
+ manual_avg v8, v8, v18
+ .endif
+ vse8.v v8, (a5)
+ add a5, a5, a2
+ .ifc \unroll,1
+ vle8.v v9, (t1)
+ vle8.v v12, (t0)
+ vwmulu.vx v10, v9, a6
+ vnclipu.wi v8, v10, 6
+ vwmulu.vx v10, v12, a6
+ .ifc \type,avg
+ vle8.v v18, (a5)
+ manual_avg v8, v8, v18
+ .endif
+ vse8.v v8, (a5)
+ add a5, a5, a2
+ vnclipu.wi v8, v10, 6
+ .ifc \type,avg
+ vle8.v v18, (a5)
+ manual_avg v8, v8, v18
+ .endif
+ vse8.v v8, (a5)
+ .endif
+ blt t2, a3, 7b
+8:
+ ret
+.endm
+
+func ff_put_rv40_chroma_mc_rvv, zve32x
+11:
+ li a7, 3
+ blt a3, a7, 12f
+ do_chroma_mc put 1
+12:
+ do_chroma_mc put 0
+endfunc
+
+func ff_avg_rv40_chroma_mc_rvv, zve32x
+21:
+ li a7, 3
+ blt a3, a7, 22f
+ do_chroma_mc avg 1
+22:
+ do_chroma_mc avg 0
+endfunc
+
+func ff_put_rv40_chroma_mc8_rvv, zve32x
+ li t6, 8
+ j 11b
+endfunc
+
+func ff_put_rv40_chroma_mc4_rvv, zve32x
+ li t6, 4
+ j 11b
+endfunc
+
+func ff_avg_rv40_chroma_mc8_rvv, zve32x
+ li t6, 8
+ j 21b
+endfunc
+
+func ff_avg_rv40_chroma_mc4_rvv, zve32x
+ li t6, 4
+ j 21b
+endfunc
diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h
index b15424d4ae..d59b3c2732 100644
--- a/libavcodec/rv34dsp.h
+++ b/libavcodec/rv34dsp.h
@@ -83,6 +83,7 @@ void ff_rv34dsp_init_riscv(RV34DSPContext *c);
void ff_rv34dsp_init_x86(RV34DSPContext *c);
void ff_rv40dsp_init_aarch64(RV34DSPContext *c);
+void ff_rv40dsp_init_riscv(RV34DSPContext *c);
void ff_rv40dsp_init_x86(RV34DSPContext *c);
void ff_rv40dsp_init_arm(RV34DSPContext *c);
diff --git a/libavcodec/rv40dsp.c b/libavcodec/rv40dsp.c
index f0208b16ea..970faec5de 100644
--- a/libavcodec/rv40dsp.c
+++ b/libavcodec/rv40dsp.c
@@ -709,6 +709,8 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c)
ff_rv40dsp_init_aarch64(c);
#elif ARCH_ARM
ff_rv40dsp_init_arm(c);
+#elif ARCH_RISCV
+ ff_rv40dsp_init_riscv(c);
#elif ARCH_X86
ff_rv40dsp_init_x86(c);
#endif
More information about the ffmpeg-cvslog
mailing list