[FFmpeg-cvslog] libavcodec/qsvdec: Add more pixel format support to qsvdec

Wenbin Chen git at videolan.org
Wed Apr 6 14:57:18 EEST 2022


ffmpeg | branch: master | Wenbin Chen <wenbin.chen-at-intel.com at ffmpeg.org> | Wed Apr  6 16:48:03 2022 +0800| [e0ae810da3ed0b4489d0dc35c40aa239ba213ec6] | committer: Haihao Xiang

libavcodec/qsvdec: Add more pixel format support to qsvdec

Qsv decoder only supports directly output nv12 and p010 to system
memory. For other format, we need to download frame from qsv format
to system memory. Now add other supported format to qsvdec.

Signed-off-by: Wenbin Chen <wenbin.chen at intel.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=e0ae810da3ed0b4489d0dc35c40aa239ba213ec6
---

 libavcodec/qsv.c          | 36 ++++++++++++++++++++++++++++++++++++
 libavcodec/qsv_internal.h |  3 +++
 libavcodec/qsvdec.c       | 23 +++++++++++++++++------
 3 files changed, 56 insertions(+), 6 deletions(-)

diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c
index 67d0e3934a..b86c20b153 100644
--- a/libavcodec/qsv.c
+++ b/libavcodec/qsv.c
@@ -244,6 +244,42 @@ int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t *fourcc)
     }
 }
 
+int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
+{
+    switch (frame->format) {
+    case AV_PIX_FMT_NV12:
+    case AV_PIX_FMT_P010:
+        surface->Data.Y  = frame->data[0];
+        surface->Data.UV = frame->data[1];
+        /* The SDK checks Data.V when using system memory for VP9 encoding */
+        surface->Data.V  = surface->Data.UV + 1;
+        break;
+    case AV_PIX_FMT_X2RGB10LE:
+    case AV_PIX_FMT_BGRA:
+        surface->Data.B = frame->data[0];
+        surface->Data.G = frame->data[0] + 1;
+        surface->Data.R = frame->data[0] + 2;
+        surface->Data.A = frame->data[0] + 3;
+        break;
+    case AV_PIX_FMT_YUYV422:
+        surface->Data.Y = frame->data[0];
+        surface->Data.U = frame->data[0] + 1;
+        surface->Data.V = frame->data[0] + 3;
+        break;
+
+    case AV_PIX_FMT_Y210:
+        surface->Data.Y16 = (mfxU16 *)frame->data[0];
+        surface->Data.U16 = (mfxU16 *)frame->data[0] + 1;
+        surface->Data.V16 = (mfxU16 *)frame->data[0] + 3;
+        break;
+    default:
+        return AVERROR(ENOSYS);
+    }
+    surface->Data.PitchLow  = frame->linesize[0];
+
+    return 0;
+}
+
 int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
 {
     int i;
diff --git a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h
index 58186ea7ca..e2aecdcbd6 100644
--- a/libavcodec/qsv_internal.h
+++ b/libavcodec/qsv_internal.h
@@ -147,4 +147,7 @@ int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame);
 void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame,
                                 mfxExtBuffer *param);
 
+int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface);
+
+
 #endif /* AVCODEC_QSV_INTERNAL_H */
diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
index dcdbb68fac..5fc5bed4c8 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -131,21 +131,28 @@ static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame,
         frame->linesize[0] = FFALIGN(avctx->width, 128);
         break;
     case AV_PIX_FMT_P010:
+    case AV_PIX_FMT_YUYV422:
         frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
         break;
+    case AV_PIX_FMT_Y210:
+        frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
+        break;
     default:
         av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
         return AVERROR(EINVAL);
     }
 
-    frame->linesize[1] = frame->linesize[0];
     frame->buf[0]      = av_buffer_pool_get(pool);
     if (!frame->buf[0])
         return AVERROR(ENOMEM);
 
     frame->data[0] = frame->buf[0]->data;
-    frame->data[1] = frame->data[0] +
-                            frame->linesize[0] * FFALIGN(avctx->height, 64);
+    if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
+        avctx->pix_fmt == AV_PIX_FMT_P010) {
+        frame->linesize[1] = frame->linesize[0];
+        frame->data[1] = frame->data[0] +
+            frame->linesize[0] * FFALIGN(avctx->height, 64);
+    }
 
     ret = ff_attach_decode_data(frame);
     if (ret < 0)
@@ -425,9 +432,11 @@ static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
     if (frame->frame->format == AV_PIX_FMT_QSV) {
         frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
     } else {
-        frame->surface.Data.PitchLow = frame->frame->linesize[0];
-        frame->surface.Data.Y        = frame->frame->data[0];
-        frame->surface.Data.UV       = frame->frame->data[1];
+        ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
+        if (ret < 0) {
+            av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
+            return ret;
+        }
     }
 
     frame->surface.Info = q->frame_info;
@@ -1010,6 +1019,8 @@ const FFCodec ff_##x##_qsv_decoder = { \
     .p.priv_class   = &x##_qsv_class, \
     .p.pix_fmts     = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
                                                     AV_PIX_FMT_P010, \
+                                                    AV_PIX_FMT_YUYV422, \
+                                                    AV_PIX_FMT_Y210, \
                                                     AV_PIX_FMT_QSV, \
                                                     AV_PIX_FMT_NONE }, \
     .hw_configs     = qsv_hw_configs, \



More information about the ffmpeg-cvslog mailing list