[FFmpeg-devel] [PATCH v2 3/4] lavc/vp8dsp: R-V V loop_filter_inner
uk7b at foxmail.com
uk7b at foxmail.com
Sun Jul 14 19:28:23 EEST 2024
From: sunyuechi <sunyuechi at iscas.ac.cn>
C908 X60
vp8_loop_filter8uv_inner_h_c : 11.0 9.5
vp8_loop_filter8uv_inner_h_rvv_i32 : 10.5 8.7
vp8_loop_filter8uv_inner_v_c : 11.2 11.0
vp8_loop_filter8uv_inner_v_rvv_i32 : 7.7 6.2
vp8_loop_filter16y_inner_h_c : 11.2 9.0
vp8_loop_filter16y_inner_h_rvv_i32 : 8.0 6.2
vp8_loop_filter16y_inner_v_c : 11.5 10.5
vp8_loop_filter16y_inner_v_rvv_i32 : 5.2 3.7
---
libavcodec/riscv/vp8dsp_init.c | 6 ++-
libavcodec/riscv/vp8dsp_rvv.S | 90 ++++++++++++++++++++++++++++++++++
2 files changed, 95 insertions(+), 1 deletion(-)
diff --git a/libavcodec/riscv/vp8dsp_init.c b/libavcodec/riscv/vp8dsp_init.c
index 8c5b2c8b04..8cb21b8ceb 100644
--- a/libavcodec/riscv/vp8dsp_init.c
+++ b/libavcodec/riscv/vp8dsp_init.c
@@ -154,7 +154,11 @@ av_cold void ff_vp8dsp_init_riscv(VP8DSPContext *c)
#define init_loop_filter(vlen) \
c->vp8_v_loop_filter_simple = ff_vp8_v_loop_filter16_simple_rvv##vlen; \
- c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_rvv##vlen;
+ c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter16_simple_rvv##vlen; \
+ c->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_rvv##vlen; \
+ c->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_rvv##vlen; \
+ c->vp8_v_loop_filter8uv_inner = ff_vp8_v_loop_filter8uv_inner_rvv##vlen; \
+ c->vp8_h_loop_filter8uv_inner = ff_vp8_h_loop_filter8uv_inner_rvv##vlen;
int flags = av_get_cpu_flags();
diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S
index 3cec4dd135..036872a29e 100644
--- a/libavcodec/riscv/vp8dsp_rvv.S
+++ b/libavcodec/riscv/vp8dsp_rvv.S
@@ -275,6 +275,13 @@ func ff_vp78_idct_dc_add4uv_rvv, zve64x
ret
endfunc
+.macro filter_abs dst, mask, tmp, diff, fI
+ vneg.v \tmp, \diff
+ vmax.vv \dst, \tmp, \diff
+ vmsleu.vx \tmp, \dst, \fI
+ vmand.mm \mask, \mask, \tmp
+.endm
+
.macro filter_fmin len, vlen, a, f1, p0f2, q0f1, p0, q0
vsetvlstatic16 \len, \vlen
vsext.vf2 \q0f1, \a
@@ -299,13 +306,36 @@ endfunc
vle8.v v4, (t3) // p0
vle8.v v5, (\dst) // q0
vle8.v v6, (t4) // q1
+ .if \normal
+ sub t1, t2, \stride // -3
+ sub t0, t1, \stride // -4
+ add t5, t4, \stride // 2
+ add t6, t5, \stride // 3
+ vle8.v v1, (t0) // p3
+ vle8.v v2, (t1) // p2
+ vle8.v v7, (t5) // q2
+ vle8.v v8, (t6) // q3
+ .endif
.else
addi t2, \dst, -2
addi t3, \dst, -1
+ .if \normal
+ addi t1, \dst, -4
+ vlsseg8e8.v v1, (t1), \stride
+ .else
vlsseg4e8.v v3, (t2), \stride
+ .endif
.endif
vwsubu.vv v10, v3, v6 // p1-q1
vwsubu.vv v12, v5, v4 // q0-p0
+.if \normal
+ vwsubu.vv v30, v1, v2 // p3-p2
+ vwsubu.vv v28, v2, v3 // p2-p1
+ vwsubu.vv v26, v3, v4 // p1-p0
+ vwsubu.vv v24, v8, v7 // q3-q2
+ vwsubu.vv v22, v7, v6 // q2-q1
+ vwsubu.vv v8, v6, v5 // q1-q0
+.endif
vnclip.wi v16, v10, 0 // clip_int8(p1 - q1)
vsetvlstatic16 \len, \vlen
@@ -319,6 +349,24 @@ endfunc
vmacc.vx v22, a6, v24
vmsleu.vx v0, v22, \fE
+.if \normal
+ vneg.v v22, v26
+ vmax.vv v26, v22, v26
+ vmsleu.vx v1, v26, \fI
+ filter_abs v22, v1, v10, v28, \fI
+ filter_abs v22, v1, v10, v30, \fI
+ filter_abs v22, v1, v10, v24, \fI
+ filter_abs v22, v1, v10, v22, \fI
+ filter_abs v20, v1, v10, v8, \fI
+ vzext.vf2 v8, v3 // p1
+ vmand.mm v1, v0, v1 // vp8_simple_limit && normal
+ vmsgtu.vx v26, v26, \thresh // hev: FFABS(p1 - p0) > thresh
+ vmsgtu.vx v3, v20, \thresh // hev: FFABS(q1 - q0) > thresh
+ vzext.vf2 v14, v6 // q1
+ vmor.mm v3, v3, v26 // FFABS(p1 - p0) > thresh || FFABS(q1 - q0) > thresh
+ vmand.mm v0, v1, v3 // v0 = normal && hev
+ vmnot.m v3, v3 // v3 = !hv
+.endif
li a7, 3
li a6, 124
li t6, 123
@@ -339,6 +387,33 @@ endfunc
vssseg2e8.v v30, (t3), \stride, v0.t
.endif
+.if \normal
+ vmand.mm v0, v1, v3 // vp8_normal_limit & !hv
+
+ vnclip.wi v22, v22, 0 // clip_int8(a);
+ filter_fmin \len, \vlen, v22, v12, v26, v10, v24, v20
+ vadd.vi v12, v12, 1
+ vsra.vi v12, v12, 1 // (f1 + 1) >> 1;
+ vadd.vv v8, v8, v12 // p1 + a
+ vsub.vv v14, v14, v12 // q1 - a
+
+ vmax.vx v8, v8, zero
+ vmax.vx v14, v14, zero
+ vsetvlstatic8 \len, \vlen
+ vnclipu.wi v3, v8, 0 // -2
+ vnclipu.wi v4, v26, 0 // -1
+ vnclipu.wi v5, v10, 0 // 0
+ vnclipu.wi v6, v14, 0 // 1
+
+ .ifc \type,v
+ vse8.v v3, (t2), v0.t // -2
+ vse8.v v4, (t3), v0.t // -1
+ vse8.v v5, (\dst), v0.t // 0
+ vse8.v v6, (t4), v0.t // 1
+ .else
+ vssseg4e8.v v3, (t2), \stride, v0.t
+ .endif
+.endif
.endm
.irp type,v,h
@@ -349,6 +424,21 @@ func ff_vp8_\type\()_loop_filter16_simple_rvv\vlen, zve32x
filter 16, \vlen, \type, 0, 0, a0, a1, a2, a3, a4
ret
endfunc
+
+func ff_vp8_\type\()_loop_filter16_inner_rvv\vlen, zve32x
+ csrwi vxrm, 0
+ vsetvlstatic8 16, \vlen
+ filter 16, \vlen, \type, 1, 1, a0, a1, a2, a3, a4
+ ret
+endfunc
+
+func ff_vp8_\type\()_loop_filter8uv_inner_rvv\vlen, zve32x
+ csrwi vxrm, 0
+ vsetvlstatic8 8, \vlen
+ filter 8, \vlen, \type, 1, 1, a0, a2, a3, a4, a5
+ filter 8, \vlen, \type, 1, 1, a1, a2, a3, a4, a5
+ ret
+endfunc
.endr
.endr
--
2.45.2
More information about the ffmpeg-devel
mailing list