[FFmpeg-devel] [PATCH 2/2] lavc/vc1dsp: R-V V vc1_inv_trans_8x4
Rémi Denis-Courmont
remi at remlab.net
Mon Jun 3 22:06:22 EEST 2024
T-Head C908:
vc1dsp.vc1_inv_trans_8x4_c: 10.5
vc1dsp.vc1_inv_trans_8x4_rvv_i32: 3.5
---
libavcodec/riscv/vc1dsp_init.c | 2 +
libavcodec/riscv/vc1dsp_rvv.S | 74 ++++++++++++++++++++++++++++++++++
2 files changed, 76 insertions(+)
diff --git a/libavcodec/riscv/vc1dsp_init.c b/libavcodec/riscv/vc1dsp_init.c
index b8a1015ce5..e63870ad44 100644
--- a/libavcodec/riscv/vc1dsp_init.c
+++ b/libavcodec/riscv/vc1dsp_init.c
@@ -29,6 +29,7 @@ void ff_vc1_inv_trans_8x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block
void ff_vc1_inv_trans_8x8_rvv(int16_t block[64]);
void ff_vc1_inv_trans_4x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_8x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_8x4_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_vc1_inv_trans_4x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
void ff_put_pixels16x16_rvi(uint8_t *dst, const uint8_t *src, ptrdiff_t line_size, int rnd);
void ff_put_pixels8x8_rvi(uint8_t *dst, const uint8_t *src, ptrdiff_t line_size, int rnd);
@@ -55,6 +56,7 @@ av_cold void ff_vc1dsp_init_riscv(VC1DSPContext *dsp)
if (flags & AV_CPU_FLAG_RVV_I32) {
if (ff_rv_vlen_least(128)) {
dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_rvv;
+ dsp->vc1_inv_trans_8x4 = ff_vc1_inv_trans_8x4_rvv;
dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_rvv;
dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_rvv;
dsp->avg_vc1_mspel_pixels_tab[0][0] = ff_avg_pixels16x16_rvv;
diff --git a/libavcodec/riscv/vc1dsp_rvv.S b/libavcodec/riscv/vc1dsp_rvv.S
index 0af4d26a11..7ac1062dcd 100644
--- a/libavcodec/riscv/vc1dsp_rvv.S
+++ b/libavcodec/riscv/vc1dsp_rvv.S
@@ -180,6 +180,29 @@ func ff_vc1_inv_trans_8_rvv, zve32x
jr t0
endfunc
+ .variant_cc ff_vc1_inv_trans_4_rvv
+func ff_vc1_inv_trans_4_rvv, zve32x
+ li t3, 17
+ vmul.vx v8, v0, t3
+ li t4, 22
+ vmul.vx v10, v2, t3
+ li t2, 10
+ vadd.vx v8, v8, t1 # +4 or +64
+ vmul.vx v14, v1, t4
+ vadd.vv v24, v8, v10 # t1
+ vsub.vv v25, v8, v10 # t2
+ vmul.vx v16, v3, t2
+ vmul.vx v18, v3, t4
+ vmul.vx v20, v1, t2
+ vadd.vv v26, v14, v16 # t3
+ vsub.vv v27, v18, v20 # t4
+ vadd.vv v0, v24, v26
+ vsub.vv v1, v25, v27
+ vadd.vv v2, v25, v27
+ vsub.vv v3, v24, v26
+ jr t0
+endfunc
+
func ff_vc1_inv_trans_8x8_rvv, zve32x
vsetivli zero, 8, e16, m1, ta, ma
addi a1, a0, 1 * 8 * 2
@@ -231,6 +254,57 @@ func ff_vc1_inv_trans_8x8_rvv, zve32x
ret
endfunc
+func ff_vc1_inv_trans_8x4_rvv, zve32x
+ vsetivli zero, 4, e16, mf2, ta, ma
+ vlseg8e16.v v0, (a2)
+ li t1, 4
+ jal t0, ff_vc1_inv_trans_8_rvv
+ vsseg8e16.v v0, (a2)
+ addi a3, a2, 1 * 8 * 2
+ vsetivli zero, 8, e16, m1, ta, ma
+ vle16.v v0, (a2)
+ addi a4, a2, 2 * 8 * 2
+ vle16.v v1, (a3)
+ addi a5, a2, 3 * 8 * 2
+ vle16.v v2, (a4)
+ li t1, 64 - (128 << 7) # bias for signed vnclip.wi below
+ vle16.v v3, (a5)
+ .irp n,0,1,2,3
+ # shift 4 vectors of 8 elems after transpose instead of 8 of 4
+ vsra.vi v\n, v\n, 3
+ .endr
+ jal t0, ff_vc1_inv_trans_4_rvv
+ add a3, a1, a0
+ vle8.v v8, (a0)
+ add a4, a1, a3
+ vle8.v v9, (a3)
+ add a5, a1, a4
+ vle8.v v10, (a4)
+ li t1, 128
+ vle8.v v11, (a5)
+ .irp n,0,1,2,3
+ vsra.vi v\n, v\n, 7
+ .endr
+ vsetvli zero, zero, e8, mf2, ta, ma
+ vwaddu.wv v0, v0, v8
+ vwaddu.wv v1, v1, v9
+ vwaddu.wv v2, v2, v10
+ vwaddu.wv v3, v3, v11
+ vnclip.wi v8, v0, 0
+ vnclip.wi v9, v1, 0
+ vnclip.wi v10, v2, 0
+ vnclip.wi v11, v3, 0
+ vxor.vx v8, v8, t1
+ vxor.vx v9, v9, t1
+ vse8.v v8, (a0)
+ vxor.vx v10, v10, t1
+ vse8.v v9, (a3)
+ vxor.vx v11, v11, t1
+ vse8.v v10, (a4)
+ vse8.v v11, (a5)
+ ret
+endfunc
+
.macro mspel_op op pos n1 n2
add t1, \pos, a2
v\op\()e8.v v\n1, (\pos)
--
2.45.1
More information about the ffmpeg-devel
mailing list