[FFmpeg-devel] [PATCH v4 6/9] avcodec/av1dec: add display frame for film grain usage
James Almer
jamrial at gmail.com
Fri Jul 9 15:52:52 EEST 2021
On 7/9/2021 12:32 AM, Fei Wang wrote:
> Make it flexible if decode frame(without apply film grain) and display
> frame(applied film grain) both needed when decode film grain clips.
Why do you need to do this for Vaapi? Nvdec doesn't require it, it just
ensures to reference one of the extra eight surfaces it has already
reserved in its DPB when starting a frame with apply_grain == 1.
You should be able to achieve the same for Vaapi. It's not ok to have
hwaccel-specific code in the base decoder, same way it isn't ok to have
arch specific code, and why we don't allow it.
>
> Signed-off-by: Fei Wang <fei.w.wang at intel.com>
> ---
> libavcodec/av1dec.c | 43 ++++++++++++++++++++++++++++++++++++++++++-
> libavcodec/av1dec.h | 3 +++
> 2 files changed, 45 insertions(+), 1 deletion(-)
>
> diff --git a/libavcodec/av1dec.c b/libavcodec/av1dec.c
> index d808a9d4c6..6bba2cb7a2 100644
> --- a/libavcodec/av1dec.c
> +++ b/libavcodec/av1dec.c
> @@ -570,6 +570,8 @@ static int get_pixel_format(AVCodecContext *avctx)
> static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
> {
> ff_thread_release_buffer(avctx, &f->tf);
> + if (f->tf_display.f->buf[0])
> + ff_thread_release_buffer(avctx, &f->tf_display);
> av_buffer_unref(&f->hwaccel_priv_buf);
> f->hwaccel_picture_private = NULL;
> av_buffer_unref(&f->header_ref);
> @@ -589,6 +591,12 @@ static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *s
> if (ret < 0)
> return ret;
>
> + if (src->tf_display.f->buf[0]) {
> + ret = ff_thread_ref_frame(&dst->tf_display, &src->tf_display);
> + if (ret < 0)
> + return ret;
> + }
> +
> dst->header_ref = av_buffer_ref(src->header_ref);
> if (!dst->header_ref)
> goto fail;
> @@ -635,9 +643,11 @@ static av_cold int av1_decode_free(AVCodecContext *avctx)
> for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
> av1_frame_unref(avctx, &s->ref[i]);
> av_frame_free(&s->ref[i].tf.f);
> + av_frame_free(&s->ref[i].tf_display.f);
> }
> av1_frame_unref(avctx, &s->cur_frame);
> av_frame_free(&s->cur_frame.tf.f);
> + av_frame_free(&s->cur_frame.tf_display.f);
>
> av_buffer_unref(&s->seq_ref);
> av_buffer_unref(&s->header_ref);
> @@ -739,6 +749,13 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
> "Failed to allocate reference frame buffer %d.\n", i);
> return AVERROR(ENOMEM);
> }
> +
> + s->ref[i].tf_display.f = av_frame_alloc();
> + if (!s->ref[i].tf_display.f) {
> + av_log(avctx, AV_LOG_ERROR,
> + "Failed to allocate display frame buffer %d.\n", i);
> + return AVERROR(ENOMEM);
> + }
> }
>
> s->cur_frame.tf.f = av_frame_alloc();
> @@ -748,6 +765,13 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
> return AVERROR(ENOMEM);
> }
>
> + s->cur_frame.tf_display.f = av_frame_alloc();
> + if (!s->cur_frame.tf_display.f) {
> + av_log(avctx, AV_LOG_ERROR,
> + "Failed to allocate current display frame buffer.\n");
> + return AVERROR(ENOMEM);
> + }
> +
> ret = ff_cbs_init(&s->cbc, AV_CODEC_ID_AV1, avctx);
> if (ret < 0)
> return ret;
> @@ -820,6 +844,16 @@ static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
> break;
> }
>
> + if (header->film_grain.apply_grain &&
> + (avctx->pix_fmt == AV_PIX_FMT_VAAPI) &&
Case in point.
> + !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN)) {
> + if ((ret = ff_thread_get_buffer(avctx, &f->tf_display, AV_GET_BUFFER_FLAG_REF)) < 0)
> + goto fail;
> +
> + f->tf_display.f->key_frame = frame->key_frame;
> + f->tf_display.f->pict_type = frame->pict_type;
> + }
> +
> if (avctx->hwaccel) {
> const AVHWAccel *hwaccel = avctx->hwaccel;
> if (hwaccel->frame_priv_data_size) {
> @@ -903,9 +937,16 @@ static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
> const AVPacket *pkt, int *got_frame)
> {
> AV1DecContext *s = avctx->priv_data;
> - const AVFrame *srcframe = s->cur_frame.tf.f;
> + const AVFrame *srcframe;
> int ret;
>
> + /* Use tf_display as output when it is available. */
> + if (s->cur_frame.tf_display.f->buf[0]) {
> + srcframe = s->cur_frame.tf_display.f;
> + } else {
> + srcframe= s->cur_frame.tf.f;
> + }
> +
> // TODO: all layers
> if (s->operating_point_idc &&
> av_log2(s->operating_point_idc >> 8) > s->cur_frame.spatial_id)
> diff --git a/libavcodec/av1dec.h b/libavcodec/av1dec.h
> index 4e140588b9..5af3dfc867 100644
> --- a/libavcodec/av1dec.h
> +++ b/libavcodec/av1dec.h
> @@ -33,6 +33,9 @@
> typedef struct AV1Frame {
> ThreadFrame tf;
>
> + /** current tf_display is only used for VA-API to apply film grain */
> + ThreadFrame tf_display;
> +
> AVBufferRef *hwaccel_priv_buf;
> void *hwaccel_picture_private;
>
>
More information about the ffmpeg-devel
mailing list