[FFmpeg-devel] [PATCH 3/3] avutil/hwcontext_qsv: fix D3D11VA<->qsv hwmap errors
Xiang, Haihao
haihao.xiang at intel.com
Mon Apr 25 12:37:30 EEST 2022
On Fri, 2022-04-01 at 17:24 +0800, Tong Wu wrote:
> For hwmap between qsv and d3d11va, The mfxHDLPair information should be
> put into texture_infos when deriving from qsv context. Moreover, when
> uploading from rawvideo, the ways that the textures are created are
> different, bindflag assertions are needed to make sure the right
> textures are derived during the process. Now after this fix,
> d3d_dec->qsv_vpp->qsv_enc, d3d_dec->qsv_vpp->qsv_download->yuv,
> yuv->d3d_upload->qsv_vpp->qsv->download->yuv,
> qsv_dec->qsv_vpp->d3d_download->yuv can all work properly.
>
> For d3d_dec->qsv_vpp->qsv_enc, one sample command line:
> ffmpeg.exe -hwaccel qsv -c:v h264_qsv -i input.264
> -vf
> "hwmap=derive_device=d3d11va,format=d3d11,hwmap=derive_device=qsv,format=qsv"
> -c:v h264_qsv -y ./output.264
The default child_device_type is dxva2 for option --enable-libmfx, I don't think
it makes sense to derive a QSV device based on dxva2 child device to a d3d11vadevice.
But even if initializing qsv device with d3d11va child device, the command below
still doesn't work
$ ffmpeg.exe -y -hwaccel qsv -init_hw_device
qsv=qsv:hw,child_device=0,child_device_type=d3d11va -c:v h264_qsv -i input.h264
-vf "hwmap=derive_device=d3d11va,format=d3d11,hwdownload,format=nv12" -f null -
You may try https://patchwork.ffmpeg.org/project/ffmpeg/list/?series=5304
Thanks
Haihao
>
> Signed-off-by: Tong Wu <tong1.wu at intel.com>
> ---
> libavutil/hwcontext_qsv.c | 48 ++++++++++++++++++++++++++++++++-------
> 1 file changed, 40 insertions(+), 8 deletions(-)
>
> diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
> index 95f8071abe..e6a7ac3ef0 100644
> --- a/libavutil/hwcontext_qsv.c
> +++ b/libavutil/hwcontext_qsv.c
> @@ -806,12 +806,23 @@ static int qsv_frames_derive_from(AVHWFramesContext
> *dst_ctx,
> #if CONFIG_D3D11VA
> case AV_HWDEVICE_TYPE_D3D11VA:
> {
> + dst_ctx->initial_pool_size = src_ctx->initial_pool_size;
> AVD3D11VAFramesContext *dst_hwctx = dst_ctx->hwctx;
> - mfxHDLPair *pair = (mfxHDLPair*)src_hwctx-
> >surfaces[i].Data.MemId;
> - dst_hwctx->texture = (ID3D11Texture2D*)pair->first;
> + dst_hwctx->texture_infos = av_calloc(src_hwctx->nb_surfaces,
> + sizeof(*dst_hwctx-
> >texture_infos));
> if (src_hwctx->frame_type & MFX_MEMTYPE_SHARED_RESOURCE)
> dst_hwctx->MiscFlags = D3D11_RESOURCE_MISC_SHARED;
> dst_hwctx->BindFlags = qsv_get_d3d11va_bind_flags(src_hwctx-
> >frame_type);
> + for (i = 0; i < src_hwctx->nb_surfaces; i++) {
> + mfxHDLPair* pair = (mfxHDLPair*)src_hwctx-
> >surfaces[i].Data.MemId;
> + dst_hwctx->texture_infos[i].texture = (ID3D11Texture2D*)pair-
> >first;
> + if (dst_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
> + dst_hwctx->texture_infos[i].index = 0;
> + }
> + else {
> + dst_hwctx->texture_infos[i].index = (intptr_t)pair-
> >second;
> + }
> + }
> }
> break;
> #endif
> @@ -900,9 +911,16 @@ static int qsv_map_from(AVHWFramesContext *ctx,
> dst->height = src->height;
>
> if (child_frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
> +#if CONFIG_D3D11VA
> + AVD3D11VAFramesContext* child_frames_hwctx = child_frames_ctx-
> >hwctx;
> mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
> dst->data[0] = pair->first;
> - dst->data[1] = pair->second;
> + if (child_frames_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
> + dst->data[1] = 0;
> + } else {
> + dst->data[1] = pair->second;
> + }
> +#endif
> } else {
> dst->data[3] = child_data;
> }
> @@ -930,9 +948,16 @@ static int qsv_map_from(AVHWFramesContext *ctx,
> dummy->height = src->height;
>
> if (child_frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
> +#if CONFIG_D3D11VA
> + AVD3D11VAFramesContext* child_frames_hwctx = child_frames_ctx->hwctx;
> mfxHDLPair *pair = (mfxHDLPair*)surf->Data.MemId;
> dummy->data[0] = pair->first;
> - dummy->data[1] = pair->second;
> + if (child_frames_hwctx->BindFlags & D3D11_BIND_RENDER_TARGET) {
> + dst->data[1] = 0;
> + } else {
> + dst->data[1] = pair->second;
> + }
> +#endif
> } else {
> dummy->data[3] = child_data;
> }
> @@ -1287,6 +1312,10 @@ static int qsv_frames_derive_to(AVHWFramesContext
> *dst_ctx,
> return AVERROR(ENOSYS);
> }
>
> + s->child_frames_ref = av_buffer_ref(dst_ctx->internal->source_frames);
> + if (!s->child_frames_ref) {
> + return AVERROR(ENOMEM);
> + }
> dst_hwctx->surfaces = s->surfaces_internal;
>
> return 0;
> @@ -1314,10 +1343,13 @@ static int qsv_map_to(AVHWFramesContext *dst_ctx,
> case AV_PIX_FMT_D3D11:
> {
> mfxHDLPair *pair = (mfxHDLPair*)hwctx->surfaces[i].Data.MemId;
> - if (pair->first == src->data[0]
> - && pair->second == src->data[1]) {
> - index = i;
> - break;
> + if (pair->first == src->data[0]) {
> + if (hwctx->frame_type &
> MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET
> + && pair->second == src->data[1]
> + || hwctx->frame_type &
> MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) {
> + index = i;
> + break;
> + }
> }
> }
> #endif
More information about the ffmpeg-devel
mailing list