[FFmpeg-devel] [PATCH v9 1/9] libavutil: add hwcontext_d3d12va and AV_PIX_FMT_D3D12
Wu, Tong1
tong1.wu at intel.com
Fri Nov 10 07:45:35 EET 2023
>Nov 8, 2023, 02:06 by tong1.wu-at-intel.com at ffmpeg.org:
>
>> From: Wu Jianhua <> toqsxw at outlook.com> >
>>
>> Signed-off-by: Wu Jianhua <> toqsxw at outlook.com> >
>> Signed-off-by: Tong Wu <> tong1.wu at intel.com> >
>> ---
>> configure | 5 +
>> doc/APIchanges | 7 +
>> libavutil/Makefile | 3 +
>> libavutil/hwcontext.c | 4 +
>> libavutil/hwcontext.h | 1 +
>> libavutil/hwcontext_d3d12va.c | 695 +++++++++++++++++++++++++
>> libavutil/hwcontext_d3d12va.h | 142 +++++
>> libavutil/hwcontext_d3d12va_internal.h | 59 +++
>> libavutil/hwcontext_internal.h | 1 +
>> libavutil/pixdesc.c | 4 +
>> libavutil/pixfmt.h | 7 +
>> libavutil/tests/hwdevice.c | 2 +
>> libavutil/version.h | 2 +-
>> 13 files changed, 931 insertions(+), 1 deletion(-)
>> create mode 100644 libavutil/hwcontext_d3d12va.c
>> create mode 100644 libavutil/hwcontext_d3d12va.h
>> create mode 100644 libavutil/hwcontext_d3d12va_internal.h
>>
>> diff --git a/configure b/configure
>> index 8ab658f730..69d7a48280 100755
>> --- a/configure
>> +++ b/configure
>> @@ -334,6 +334,7 @@ External library support:
>> --disable-cuda-llvm disable CUDA compilation using clang [autodetect]
>> --disable-cuvid disable Nvidia CUVID support [autodetect]
>> --disable-d3d11va disable Microsoft Direct3D 11 video acceleration code
>[autodetect]
>> + --disable-d3d12va disable Microsoft Direct3D 12 video acceleration
>code [autodetect]
>> --disable-dxva2 disable Microsoft DirectX 9 video acceleration code
>[autodetect]
>> --disable-ffnvcodec disable dynamically linked Nvidia code [autodetect]
>> --enable-libdrm enable DRM code (Linux) [no]
>> @@ -1922,6 +1923,7 @@ HWACCEL_AUTODETECT_LIBRARY_LIST="
>> cuda_llvm
>> cuvid
>> d3d11va
>> + d3d12va
>> dxva2
>> ffnvcodec
>> nvdec
>> @@ -3041,6 +3043,7 @@ crystalhd_deps="libcrystalhd_libcrystalhd_if_h"
>> cuda_deps="ffnvcodec"
>> cuvid_deps="ffnvcodec"
>> d3d11va_deps="dxva_h ID3D11VideoDecoder ID3D11VideoContext"
>> +d3d12va_deps="dxva_h ID3D12Device ID3D12VideoDecoder"
>> dxva2_deps="dxva2api_h DXVA2_ConfigPictureDecode ole32 user32"
>> ffnvcodec_deps_any="libdl LoadLibrary"
>> mediacodec_deps="android"
>> @@ -6563,6 +6566,8 @@ check_type "windows.h dxgi1_2.h"
>"IDXGIOutput1"
>> check_type "windows.h dxgi1_5.h" "IDXGIOutput5"
>> check_type "windows.h d3d11.h" "ID3D11VideoDecoder"
>> check_type "windows.h d3d11.h" "ID3D11VideoContext"
>> +check_type "windows.h d3d12.h" "ID3D12Device"
>> +check_type "windows.h d3d12video.h" "ID3D12VideoDecoder"
>> check_type "windows.h" "DPI_AWARENESS_CONTEXT" -
>D_WIN32_WINNT=0x0A00
>> check_type "d3d9.h dxva2api.h" DXVA2_ConfigPictureDecode -
>D_WIN32_WINNT=0x0602
>> check_func_headers mfapi.h MFCreateAlignedMemoryBuffer -lmfplat
>> diff --git a/doc/APIchanges b/doc/APIchanges
>> index d4511ce2dd..2e71943b34 100644
>> --- a/doc/APIchanges
>> +++ b/doc/APIchanges
>> @@ -2,6 +2,13 @@ The last version increases of all libraries were on 2023-
>02-09
>>
>> API changes, most recent first:
>>
>> +2023-11-07 - xxxxxxxxxx - lavu 58.32.100 - pixfmt.h hwcontext.h
>hwcontext_d3d12va.h
>> + Add AV_HWDEVICE_TYPE_D3D12VA and AV_PIX_FMT_D3D12.
>> + Add AVD3D12VADeviceContext, AVD3D12VASyncContext,
>AVD3D12VAFrame and
>> + AVD3D12VAFramesContext.
>> + Add av_d3d12va_map_sw_to_hw_format,
>av_d3d12va_sync_context_alloc,
>> + av_d3d12va_sync_context_free.
>> +
>> 2023-10-31 - xxxxxxxxxx - lavu 58.31.100 - pixdesc.h
>> Add AV_PIX_FMT_FLAG_XYZ.
>>
>> diff --git a/libavutil/Makefile b/libavutil/Makefile
>> index 4711f8cde8..6a8566f1d9 100644
>> --- a/libavutil/Makefile
>> +++ b/libavutil/Makefile
>> @@ -42,6 +42,7 @@ HEADERS = adler32.h \
>> hwcontext.h \
>> hwcontext_cuda.h \
>> hwcontext_d3d11va.h \
>> + hwcontext_d3d12va.h \
>> hwcontext_drm.h \
>> hwcontext_dxva2.h \
>> hwcontext_qsv.h \
>> @@ -190,6 +191,7 @@ OBJS = adler32.o \
>>
>> OBJS-$(CONFIG_CUDA) += hwcontext_cuda.o
>> OBJS-$(CONFIG_D3D11VA) += hwcontext_d3d11va.o
>> +OBJS-$(CONFIG_D3D12VA) += hwcontext_d3d12va.o
>> OBJS-$(CONFIG_DXVA2) += hwcontext_dxva2.o
>> OBJS-$(CONFIG_LIBDRM) += hwcontext_drm.o
>> OBJS-$(CONFIG_MACOS_KPERF) += macos_kperf.o
>> @@ -213,6 +215,7 @@ SKIPHEADERS-$(HAVE_CUDA_H) +=
>hwcontext_cuda.h
>> SKIPHEADERS-$(CONFIG_CUDA) += hwcontext_cuda_internal.h \
>> cuda_check.h
>> SKIPHEADERS-$(CONFIG_D3D11VA) += hwcontext_d3d11va.h
>> +SKIPHEADERS-$(CONFIG_D3D12VA) += hwcontext_d3d12va.h
>> SKIPHEADERS-$(CONFIG_DXVA2) += hwcontext_dxva2.h
>> SKIPHEADERS-$(CONFIG_QSV) += hwcontext_qsv.h
>> SKIPHEADERS-$(CONFIG_OPENCL) += hwcontext_opencl.h
>> diff --git a/libavutil/hwcontext.c b/libavutil/hwcontext.c
>> index 3650d4653a..e23bad230f 100644
>> --- a/libavutil/hwcontext.c
>> +++ b/libavutil/hwcontext.c
>> @@ -36,6 +36,9 @@ static const HWContextType * const hw_table[] = {
>> #if CONFIG_D3D11VA
>> &ff_hwcontext_type_d3d11va,
>> #endif
>> +#if CONFIG_D3D12VA
>> + &ff_hwcontext_type_d3d12va,
>> +#endif
>> #if CONFIG_LIBDRM
>> &ff_hwcontext_type_drm,
>> #endif
>> @@ -71,6 +74,7 @@ static const char *const hw_type_names[] = {
>> [AV_HWDEVICE_TYPE_DRM] = "drm",
>> [AV_HWDEVICE_TYPE_DXVA2] = "dxva2",
>> [AV_HWDEVICE_TYPE_D3D11VA] = "d3d11va",
>> + [AV_HWDEVICE_TYPE_D3D12VA] = "d3d12va",
>> [AV_HWDEVICE_TYPE_OPENCL] = "opencl",
>> [AV_HWDEVICE_TYPE_QSV] = "qsv",
>> [AV_HWDEVICE_TYPE_VAAPI] = "vaapi",
>> diff --git a/libavutil/hwcontext.h b/libavutil/hwcontext.h
>> index 7ff08c8608..2b33721a97 100644
>> --- a/libavutil/hwcontext.h
>> +++ b/libavutil/hwcontext.h
>> @@ -37,6 +37,7 @@ enum AVHWDeviceType {
>> AV_HWDEVICE_TYPE_OPENCL,
>> AV_HWDEVICE_TYPE_MEDIACODEC,
>> AV_HWDEVICE_TYPE_VULKAN,
>> + AV_HWDEVICE_TYPE_D3D12VA,
>> };
>>
>> typedef struct AVHWDeviceInternal AVHWDeviceInternal;
>> diff --git a/libavutil/hwcontext_d3d12va.c b/libavutil/hwcontext_d3d12va.c
>> new file mode 100644
>> index 0000000000..0f1eecfc0c
>> --- /dev/null
>> +++ b/libavutil/hwcontext_d3d12va.c
>> @@ -0,0 +1,695 @@
>> +/*
>> + * Direct3D 12 HW acceleration.
>> + *
>> + * copyright (c) 2022-2023 Wu Jianhua <> toqsxw at outlook.com> >
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
>USA
>> + */
>> +
>> +#include "config.h"
>> +#include "common.h"
>> +#include "hwcontext.h"
>> +#include "hwcontext_internal.h"
>> +#include "hwcontext_d3d12va_internal.h"
>> +#include "hwcontext_d3d12va.h"
>> +#include "imgutils.h"
>> +#include "pixdesc.h"
>> +#include "pixfmt.h"
>> +#include "thread.h"
>> +#include "compat/w32dlfcn.h"
>> +#include <dxgi1_3.h>
>> +
>> +typedef HRESULT(WINAPI *PFN_CREATE_DXGI_FACTORY2)(UINT Flags,
>REFIID riid, void **ppFactory);
>> +
>> +typedef struct D3D12VAFramesContext {
>> + ID3D12Resource *staging_buffer;
>> + ID3D12CommandQueue *command_queue;
>> + ID3D12CommandAllocator *command_allocator;
>> + ID3D12GraphicsCommandList *command_list;
>> + AVD3D12VASyncContext *sync_ctx;
>> + int nb_surfaces;
>> + int nb_surfaces_used;
>> + DXGI_FORMAT format;
>>
>
>This value should be public, like it is with Vulkan.
>Otherwise, users will not be able to specify the exact
>format the AVPixelFormat maps to.
>
Ok I will move it to public then.
>
>> + UINT luma_component_size;
>> +} D3D12VAFramesContext;
>> +
>> +typedef struct D3D12VADevicePriv {
>> + HANDLE d3d12lib;
>> + HANDLE dxgilib;
>> + PFN_CREATE_DXGI_FACTORY2 create_dxgi_factory2;
>> + PFN_D3D12_CREATE_DEVICE create_device;
>> + PFN_D3D12_GET_DEBUG_INTERFACE get_debug_interface;
>> +} D3D12VADevicePriv;
>> +
>> +static const struct {
>> + DXGI_FORMAT d3d_format;
>> + enum AVPixelFormat pix_fmt;
>> +} supported_formats[] = {
>> + { DXGI_FORMAT_NV12, AV_PIX_FMT_NV12 },
>> + { DXGI_FORMAT_P010, AV_PIX_FMT_P010 },
>> +};
>> +
>> +DXGI_FORMAT av_d3d12va_map_sw_to_hw_format(enum AVPixelFormat
>pix_fmt)
>> +{
>> + switch (pix_fmt) {
>> + case AV_PIX_FMT_NV12:return DXGI_FORMAT_NV12;
>> + case AV_PIX_FMT_P010:return DXGI_FORMAT_P010;
>> + default: return DXGI_FORMAT_UNKNOWN;
>> + }
>> +}
>> +
>> +static int d3d12va_fence_completion(AVD3D12VASyncContext *sync_ctx)
>> +{
>> + uint64_t completion = ID3D12Fence_GetCompletedValue(sync_ctx-
>>fence);
>> + if (completion < sync_ctx->fence_value) {
>> + if (FAILED(ID3D12Fence_SetEventOnCompletion(sync_ctx->fence,
>sync_ctx->fence_value, sync_ctx->event)))
>> + return AVERROR(EINVAL);
>> +
>> + WaitForSingleObjectEx(sync_ctx->event, INFINITE, FALSE);
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +static inline int d3d12va_wait_queue_idle(AVD3D12VASyncContext
>*sync_ctx, ID3D12CommandQueue *command_queue)
>> +{
>> + DX_CHECK(ID3D12CommandQueue_Signal(command_queue, sync_ctx-
>>fence, ++sync_ctx->fence_value));
>> + return d3d12va_fence_completion(sync_ctx);
>> +
>> +fail:
>> + return AVERROR(EINVAL);
>> +}
>> +
>> +int av_d3d12va_sync_context_alloc(AVD3D12VADeviceContext *ctx,
>AVD3D12VASyncContext **psync_ctx)
>> +{
>> + AVD3D12VASyncContext *sync_ctx;
>> +
>> + sync_ctx = av_mallocz(sizeof(AVD3D12VASyncContext));
>> + if (!sync_ctx)
>> + return AVERROR(ENOMEM);
>> +
>> + DX_CHECK(ID3D12Device_CreateFence(ctx->device, sync_ctx-
>>fence_value, D3D12_FENCE_FLAG_NONE,
>> + &IID_ID3D12Fence, (void **)&sync_ctx->fence));
>> +
>> + sync_ctx->event = CreateEvent(NULL, FALSE, FALSE, NULL);
>> + if (!sync_ctx->event)
>> + goto fail;
>> +
>> + *psync_ctx = sync_ctx;
>> +
>> + return 0;
>> +
>> +fail:
>> + D3D12_OBJECT_RELEASE(sync_ctx->fence);
>> + av_freep(&sync_ctx);
>> + return AVERROR(EINVAL);
>> +}
>> +
>> +void av_d3d12va_sync_context_free(AVD3D12VASyncContext **psync_ctx)
>> +{
>> + AVD3D12VASyncContext *sync_ctx;
>> +
>> + if (!psync_ctx || !*psync_ctx)
>> + return;
>> +
>> + sync_ctx = *psync_ctx;
>> +
>> + d3d12va_fence_completion(sync_ctx);
>> +
>> + D3D12_OBJECT_RELEASE(sync_ctx->fence);
>> +
>> + if (sync_ctx->event)
>> + CloseHandle(sync_ctx->event);
>> +
>> + av_freep(psync_ctx);
>> +}
>> +
>> +static inline int create_resource(ID3D12Device *device, const
>D3D12_RESOURCE_DESC *desc, D3D12_RESOURCE_STATES states,
>ID3D12Resource **ppResource, int is_read_back)
>> +{
>> + D3D12_HEAP_PROPERTIES props = {
>> + .Type = is_read_back ? D3D12_HEAP_TYPE_READBACK :
>D3D12_HEAP_TYPE_DEFAULT,
>> + .CPUPageProperty = D3D12_CPU_PAGE_PROPERTY_UNKNOWN,
>> + .MemoryPoolPreference = D3D12_MEMORY_POOL_UNKNOWN,
>> + .CreationNodeMask = 0,
>> + .VisibleNodeMask = 0,
>> + };
>> +
>> + if (FAILED(ID3D12Device_CreateCommittedResource(device, &props,
>D3D12_HEAP_FLAG_NONE, desc,
>> + states, NULL, &IID_ID3D12Resource, (void **)ppResource)))
>> + return AVERROR(EINVAL);
>> +
>> + return 0;
>> +}
>> +
>> +static int d3d12va_create_staging_buffer_resource(AVHWFramesContext
>*ctx)
>> +{
>> + AVD3D12VADeviceContext *device_hwctx = ctx->device_ctx->hwctx;
>> + D3D12VAFramesContext *s = ctx->internal->priv;
>> +
>> + D3D12_RESOURCE_DESC desc = {
>> + .Dimension = D3D12_RESOURCE_DIMENSION_BUFFER,
>> + .Alignment = 0,
>> + .Width = 0,
>> + .Height = 1,
>> + .DepthOrArraySize = 1,
>> + .MipLevels = 1,
>> + .Format = DXGI_FORMAT_UNKNOWN,
>> + .SampleDesc = { .Count = 1, .Quality = 0 },
>> + .Layout = D3D12_TEXTURE_LAYOUT_ROW_MAJOR,
>> + .Flags = D3D12_RESOURCE_FLAG_NONE,
>> + };
>> +
>> + s->luma_component_size = FFALIGN(ctx->width * (s->format ==
>DXGI_FORMAT_P010 ? 2 : 1), D3D12_TEXTURE_DATA_PITCH_ALIGNMENT) *
>ctx->height;
>> + desc.Width = s->luma_component_size + (s->luma_component_size >>
>1);
>> +
>> + return create_resource(device_hwctx->device, &desc,
>D3D12_RESOURCE_STATE_COPY_DEST, &s->staging_buffer, 1);
>> +}
>> +
>> +static int d3d12va_create_helper_objects(AVHWFramesContext *ctx)
>> +{
>> + AVD3D12VADeviceContext *device_hwctx = ctx->device_ctx->hwctx;
>> + D3D12VAFramesContext *s = ctx->internal->priv;
>> +
>> + D3D12_COMMAND_QUEUE_DESC queue_desc = {
>> + .Type = D3D12_COMMAND_LIST_TYPE_COPY,
>> + .Priority = 0,
>> + .NodeMask = 0,
>> + };
>> +
>> + int ret = d3d12va_create_staging_buffer_resource(ctx);
>> + if (ret < 0)
>> + return ret;
>> +
>> + ret = av_d3d12va_sync_context_alloc(device_hwctx, &s->sync_ctx);
>> + if (ret < 0)
>> + return ret;
>> +
>> + DX_CHECK(ID3D12Device_CreateCommandQueue(device_hwctx->device,
>&queue_desc,
>> + &IID_ID3D12CommandQueue, (void **)&s->command_queue));
>> +
>> + DX_CHECK(ID3D12Device_CreateCommandAllocator(device_hwctx-
>>device, queue_desc.Type,
>> + &IID_ID3D12CommandAllocator, (void **)&s-
>>command_allocator));
>> +
>> + DX_CHECK(ID3D12Device_CreateCommandList(device_hwctx->device, 0,
>queue_desc.Type,
>> + s->command_allocator, NULL, &IID_ID3D12GraphicsCommandList,
>(void **)&s->command_list));
>> +
>> + DX_CHECK(ID3D12GraphicsCommandList_Close(s->command_list));
>> +
>> + ID3D12CommandQueue_ExecuteCommandLists(s->command_queue, 1,
>(ID3D12CommandList **)&s->command_list);
>> +
>> + return d3d12va_wait_queue_idle(s->sync_ctx, s->command_queue);
>> +
>> +fail:
>> + return AVERROR(EINVAL);
>> +}
>> +
>> +static void d3d12va_frames_uninit(AVHWFramesContext *ctx)
>> +{
>> + AVD3D12VAFramesContext *frames_hwctx = ctx->hwctx;
>> + D3D12VAFramesContext *s = ctx->internal->priv;
>> +
>> + av_d3d12va_sync_context_free(&s->sync_ctx);
>> +
>> + D3D12_OBJECT_RELEASE(s->staging_buffer);
>> + D3D12_OBJECT_RELEASE(s->command_allocator);
>> + D3D12_OBJECT_RELEASE(s->command_list);
>> + D3D12_OBJECT_RELEASE(s->command_queue);
>> +
>> + av_freep(&frames_hwctx->texture_infos);
>> +}
>> +
>> +static int d3d12va_frames_get_constraints(AVHWDeviceContext *ctx, const
>void *hwconfig, AVHWFramesConstraints *constraints)
>> +{
>> + HRESULT hr;
>> + int nb_sw_formats = 0;
>> + AVD3D12VADeviceContext *device_hwctx = ctx->hwctx;
>> +
>> + constraints->valid_sw_formats =
>av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1,
>> + sizeof(*constraints->valid_sw_formats));
>> + if (!constraints->valid_sw_formats)
>> + return AVERROR(ENOMEM);
>> +
>> + for (int i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
>> + D3D12_FEATURE_DATA_FORMAT_SUPPORT format_support =
>{ supported_formats[i].d3d_format };
>> + hr = ID3D12Device_CheckFeatureSupport(device_hwctx->device,
>D3D12_FEATURE_FORMAT_SUPPORT, &format_support,
>sizeof(format_support));
>> + if (SUCCEEDED(hr) && (format_support.Support1 &
>D3D12_FORMAT_SUPPORT1_TEXTURE2D))
>> + constraints->valid_sw_formats[nb_sw_formats++] =
>supported_formats[i].pix_fmt;
>> + }
>> + constraints->valid_sw_formats[nb_sw_formats] = AV_PIX_FMT_NONE;
>> +
>> + constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints-
>>valid_hw_formats));
>> + if (!constraints->valid_hw_formats)
>> + return AVERROR(ENOMEM);
>> +
>> + constraints->valid_hw_formats[0] = AV_PIX_FMT_D3D12;
>> + constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
>> +
>> + return 0;
>> +}
>> +
>> +static void free_texture(void *opaque, uint8_t *data)
>> +{
>> + AVD3D12VAFrame *frame = (AVD3D12VAFrame *)data;
>> +
>> + if (frame->sync_ctx)
>> + av_d3d12va_sync_context_free(&frame->sync_ctx);
>> +
>> + D3D12_OBJECT_RELEASE(frame->texture);
>> + av_freep(&data);
>> +}
>> +
>> +static AVBufferRef *d3d12va_pool_alloc(void *opaque, size_t size)
>> +{
>> + AVHWFramesContext *ctx = (AVHWFramesContext *)opaque;
>> + D3D12VAFramesContext *s = ctx->internal->priv;
>> + AVD3D12VAFramesContext *hwctx = ctx->hwctx;
>> + AVD3D12VADeviceContext *device_hwctx = ctx->device_ctx->hwctx;
>> +
>> + int ret;
>> + AVBufferRef *buf;
>> + AVD3D12VAFrame *frame;
>> +
>> + D3D12_RESOURCE_DESC desc = {
>> + .Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D,
>> + .Alignment = 0,
>> + .Width = ctx->width,
>> + .Height = ctx->height,
>> + .DepthOrArraySize = 1,
>> + .MipLevels = 1,
>> + .Format = s->format,
>> + .SampleDesc = {.Count = 1, .Quality = 0 },
>> + .Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN,
>> + .Flags = D3D12_RESOURCE_FLAG_NONE,
>> + };
>> +
>> + if (s->nb_surfaces_used >= ctx->initial_pool_size) {
>> + av_log(ctx, AV_LOG_ERROR, "Static surface pool size exceeded.\n");
>> + return NULL;
>> + }
>> +
>> + frame = av_mallocz(sizeof(AVD3D12VAFrame));
>> + if (!frame)
>> + return NULL;
>> +
>> + ret = create_resource(device_hwctx->device, &desc,
>D3D12_RESOURCE_STATE_COMMON, &frame->texture, 0);
>> + if (ret < 0)
>> + goto fail;
>> +
>> + ret = av_d3d12va_sync_context_alloc(device_hwctx, &frame->sync_ctx);
>> + if (ret < 0)
>> + goto fail;
>> +
>> + frame->index = s->nb_surfaces_used;
>> + hwctx->texture_infos[s->nb_surfaces_used].texture = frame->texture;
>> + hwctx->texture_infos[s->nb_surfaces_used].index = frame->index;
>> + hwctx->texture_infos[s->nb_surfaces_used].sync_ctx = frame->sync_ctx;
>> + s->nb_surfaces_used++;
>> +
>> + buf = av_buffer_create((uint8_t *)frame, sizeof(frame), free_texture,
>NULL, 0);
>> + if (!buf)
>> + goto fail;
>> +
>> + return buf;
>> +
>> +fail:
>> + free_texture(NULL, (uint8_t *)frame);
>> + return NULL;
>> +}
>> +
>> +static int d3d12va_frames_init(AVHWFramesContext *ctx)
>> +{
>> + AVD3D12VAFramesContext *hwctx = ctx->hwctx;
>> + AVD3D12VADeviceContext *device_hwctx = ctx->device_ctx->hwctx;
>> + D3D12VAFramesContext *s = ctx->internal->priv;
>> +
>> + int i;
>> +
>> + for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
>> + if (ctx->sw_format == supported_formats[i].pix_fmt) {
>> + s->format = supported_formats[i].d3d_format;
>> + break;
>> + }
>> + }
>> + if (i == FF_ARRAY_ELEMS(supported_formats)) {
>> + av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n",
>> + av_get_pix_fmt_name(ctx->sw_format));
>> + return AVERROR(EINVAL);
>> + }
>> +
>> + hwctx->texture_infos = av_realloc_f(NULL, ctx->initial_pool_size,
>sizeof(*hwctx->texture_infos));
>> + if (!hwctx->texture_infos)
>> + return AVERROR(ENOMEM);
>> +
>> + memset(hwctx->texture_infos, 0, ctx->initial_pool_size * sizeof(*hwctx-
>>texture_infos));
>> + s->nb_surfaces = ctx->initial_pool_size;
>> +
>> + ctx->internal->pool_internal =
>av_buffer_pool_init2(sizeof(AVD3D12VAFrame),
>> + ctx, d3d12va_pool_alloc, NULL);
>> +
>> + if (!ctx->internal->pool_internal)
>> + return AVERROR(ENOMEM);
>> +
>> + return 0;
>> +}
>> +
>> +static int d3d12va_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
>> +{
>> + int ret;
>> +
>> + frame->buf[0] = av_buffer_pool_get(ctx->pool);
>> + if (!frame->buf[0])
>> + return AVERROR(ENOMEM);
>> +
>> + ret = av_image_fill_arrays(frame->data, frame->linesize, NULL,
>> + ctx->sw_format, ctx->width, ctx->height,
>> + D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
>> + if (ret < 0)
>> + return ret;
>> +
>> + frame->data[0] = frame->buf[0]->data;
>> + frame->format = AV_PIX_FMT_D3D12;
>> + frame->width = ctx->width;
>> + frame->height = ctx->height;
>> +
>> + return 0;
>> +}
>> +
>> +static int d3d12va_transfer_get_formats(AVHWFramesContext *ctx,
>> + enum AVHWFrameTransferDirection dir,
>> + enum AVPixelFormat **formats)
>> +{
>> + D3D12VAFramesContext *s = ctx->internal->priv;
>> + enum AVPixelFormat *fmts;
>> +
>> + fmts = av_malloc_array(2, sizeof(*fmts));
>> + if (!fmts)
>> + return AVERROR(ENOMEM);
>> +
>> + fmts[0] = ctx->sw_format;
>> + fmts[1] = AV_PIX_FMT_NONE;
>> +
>> + *formats = fmts;
>> +
>> + return 0;
>> +}
>> +
>> +static int d3d12va_transfer_data(AVHWFramesContext *ctx, AVFrame *dst,
>> + const AVFrame *src)
>> +{
>> + AVD3D12VADeviceContext *hwctx = ctx->device_ctx->hwctx;
>> + AVD3D12VAFramesContext *frames_hwctx = ctx->hwctx;
>> + D3D12VAFramesContext *s = ctx->internal->priv;
>> +
>> + int ret;
>> + int download = src->format == AV_PIX_FMT_D3D12;
>> + const AVFrame *frame = download ? src : dst;
>> + const AVFrame *other = download ? dst : src;
>> +
>> + AVD3D12VAFrame *f = (AVD3D12VAFrame *)frame->data[0];
>> + ID3D12Resource *texture = (ID3D12Resource *) f->texture;
>> + int index = (intptr_t) f->index;
>> + AVD3D12VASyncContext *sync_ctx = (AVD3D12VASyncContext *)f-
>>sync_ctx;
>> +
>> + uint8_t *mapped_data;
>> + uint8_t *data[4];
>> + int linesizes[4];
>> +
>> + D3D12_TEXTURE_COPY_LOCATION staging_y_location;
>> + D3D12_TEXTURE_COPY_LOCATION staging_uv_location;
>> +
>> + D3D12_TEXTURE_COPY_LOCATION texture_y_location = {
>> + .pResource = texture,
>> + .Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX,
>> + .SubresourceIndex = 0,
>> + };
>> +
>> + D3D12_TEXTURE_COPY_LOCATION texture_uv_location = {
>> + .pResource = texture,
>> + .Type = D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX,
>> + .SubresourceIndex = 1,
>> + };
>> +
>> + D3D12_RESOURCE_BARRIER barrier = {
>> + .Type = D3D12_RESOURCE_BARRIER_TYPE_TRANSITION,
>> + .Flags = D3D12_RESOURCE_BARRIER_FLAG_NONE,
>> + .Transition = {
>> + .pResource = texture,
>> + .StateBefore = D3D12_RESOURCE_STATE_COMMON,
>> + .StateAfter = D3D12_RESOURCE_STATE_COPY_SOURCE,
>> + .Subresource = D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES,
>> + }
>> + };
>> +
>> + s->format = av_d3d12va_map_sw_to_hw_format(ctx->sw_format);
>> +
>> + if (frame->hw_frames_ctx->data != (uint8_t *)ctx || other->format != ctx-
>>sw_format)
>> + return AVERROR(EINVAL);
>> +
>> + if (!s->command_queue) {
>> + ret = d3d12va_create_helper_objects(ctx);
>> + if (ret < 0)
>> + return ret;
>> + }
>>
>
>This doesn't look threadsafe.
>You should either create a staging buffer every time the function
>is called, or have a pool.
Ok I'll redesign this part. Probably create buffer every time.
>
>In case host-mapping of system RAM is supported on D3D12 like on
>Vulkan, you can also skip allocating a staging buffer altogether and
>just treat the video frame as input.
>
>
>> + for (int i = 0; i < 4; i++)
>> + linesizes[i] = FFALIGN(frame->width * (s->format ==
>DXGI_FORMAT_P010 ? 2 : 1), D3D12_TEXTURE_DATA_PITCH_ALIGNMENT);
>> +
>> + staging_y_location = (D3D12_TEXTURE_COPY_LOCATION) {
>> + .pResource = s->staging_buffer,
>> + .Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT,
>> + .PlacedFootprint = {
>> + .Offset = 0,
>> + .Footprint = {
>> + .Format = s->format == DXGI_FORMAT_P010 ?
>DXGI_FORMAT_R16_UNORM : DXGI_FORMAT_R8_UNORM,
>> + .Width = ctx->width,
>> + .Height = ctx->height,
>> + .Depth = 1,
>> + .RowPitch = linesizes[0],
>> + },
>> + },
>> + };
>> +
>> + staging_uv_location = (D3D12_TEXTURE_COPY_LOCATION) {
>> + .pResource = s->staging_buffer,
>> + .Type = D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT,
>> + .PlacedFootprint = {
>> + .Offset = s->luma_component_size,
>> + .Footprint = {
>> + .Format = s->format == DXGI_FORMAT_P010 ?
>DXGI_FORMAT_R16G16_UNORM : DXGI_FORMAT_R8G8_UNORM,
>> + .Width = ctx->width >> 1,
>> + .Height = ctx->height >> 1,
>> + .Depth = 1,
>> + .RowPitch = linesizes[0],
>> + },
>> + },
>> + };
>> +
>> + DX_CHECK(ID3D12CommandAllocator_Reset(s->command_allocator));
>> +
>> + DX_CHECK(ID3D12GraphicsCommandList_Reset(s->command_list, s-
>>command_allocator, NULL));
>> +
>> + if (download) {
>> + ID3D12GraphicsCommandList_ResourceBarrier(s->command_list, 1,
>&barrier);
>> +
>> + ID3D12GraphicsCommandList_CopyTextureRegion(s->command_list,
>> + &staging_y_location, 0, 0, 0,
>> + &texture_y_location, NULL);
>> +
>> + ID3D12GraphicsCommandList_CopyTextureRegion(s->command_list,
>> + &staging_uv_location, 0, 0, 0,
>> + &texture_uv_location, NULL);
>> +
>> + barrier.Transition.StateBefore = barrier.Transition.StateAfter;
>> + barrier.Transition.StateAfter = D3D12_RESOURCE_STATE_COMMON;
>> + ID3D12GraphicsCommandList_ResourceBarrier(s->command_list, 1,
>&barrier);
>> +
>> + DX_CHECK(ID3D12GraphicsCommandList_Close(s->command_list));
>> +
>> + DX_CHECK(ID3D12CommandQueue_Wait(s->command_queue,
>sync_ctx->fence, sync_ctx->fence_value));
>> +
>> + ID3D12CommandQueue_ExecuteCommandLists(s->command_queue,
>1, (ID3D12CommandList **)&s->command_list);
>> +
>> + ret = d3d12va_wait_queue_idle(s->sync_ctx, s->command_queue);
>> + if (ret < 0)
>> + return ret;
>> +
>> + DX_CHECK(ID3D12Resource_Map(s->staging_buffer, 0, NULL, (void
>**)&mapped_data));
>> + av_image_fill_pointers(data, ctx->sw_format, ctx->height,
>mapped_data, linesizes);
>> +
>> + av_image_copy(dst->data, dst->linesize, data, linesizes,
>> + ctx->sw_format, ctx->width, ctx->height);
>> +
>> + ID3D12Resource_Unmap(s->staging_buffer, 0, NULL);
>> + } else {
>> + av_log(ctx, AV_LOG_ERROR, "Transfer data to AV_PIX_FMT_D3D12 is
>not supported yet!\n");
>> + return AVERROR(EINVAL);
>> + }
>>
>
>Set .transfer_data_to = NULL, in that case?
>I do think you should implement uploading, the patchset is old
>enough to at least have some fundamental features that are useful for
>testing and debugging.
>
Will implement uploading on next patchset.
>
>> +
>> + return 0;
>> +
>> +fail:
>> + return AVERROR(EINVAL);
>> +}
>> +
>> +static int d3d12va_load_functions(AVHWDeviceContext *hwdev)
>> +{
>> + D3D12VADevicePriv *priv = hwdev->internal->priv;
>> +
>> +#if !HAVE_UWP
>> + priv->d3d12lib = dlopen("d3d12.dll", 0);
>> + priv->dxgilib = dlopen("dxgi.dll", 0);
>> +
>> + if (!priv->d3d12lib || !priv->dxgilib)
>> + goto fail;
>> +
>> + priv->create_device = (PFN_D3D12_CREATE_DEVICE)GetProcAddress(priv-
>>d3d12lib, "D3D12CreateDevice");
>> + if (!priv->create_device)
>> + goto fail;
>> +
>> + priv->create_dxgi_factory2 =
>(PFN_CREATE_DXGI_FACTORY2)GetProcAddress(priv->dxgilib,
>"CreateDXGIFactory2");
>> + if (!priv->create_dxgi_factory2)
>> + goto fail;
>> +
>> + priv->get_debug_interface =
>(PFN_D3D12_GET_DEBUG_INTERFACE)GetProcAddress(priv->d3d12lib,
>"D3D12GetDebugInterface");
>> +#else
>> + priv->create_device = (PFN_D3D12_CREATE_DEVICE)
>D3D12CreateDevice;
>> + priv->create_dxgi_factory2 = (PFN_CREATE_DXGI_FACTORY2)
>CreateDXGIFactory2;
>> + priv->get_debug_interface = (PFN_D3D12_GET_DEBUG_INTERFACE)
>D3D12GetDebugInterface;
>> +#endif
>> + return 0;
>> +
>> +fail:
>> + av_log(hwdev, AV_LOG_ERROR, "Failed to load D3D12 library or its
>functions\n");
>> + return AVERROR_UNKNOWN;
>> +}
>> +
>> +static void d3d12va_device_free(AVHWDeviceContext *hwdev)
>> +{
>> + AVD3D12VADeviceContext *ctx = hwdev->hwctx;
>> + D3D12VADevicePriv *priv = hwdev->internal->priv;
>> +
>> + D3D12_OBJECT_RELEASE(ctx->device);
>> +
>> + if (priv->d3d12lib)
>> + dlclose(priv->d3d12lib);
>> +
>> + if (priv->dxgilib)
>> + dlclose(priv->dxgilib);
>> +}
>> +
>> +static int d3d12va_device_init(AVHWDeviceContext *hwdev)
>> +{
>> + AVD3D12VADeviceContext *ctx = hwdev->hwctx;
>> +
>> + if (!ctx->video_device)
>> + DX_CHECK(ID3D12Device_QueryInterface(ctx->device,
>&IID_ID3D12VideoDevice, (void **)&ctx->video_device));
>> +
>> + return 0;
>> +
>> +fail:
>> + return AVERROR(EINVAL);
>> +}
>> +
>> +static void d3d12va_device_uninit(AVHWDeviceContext *hwdev)
>> +{
>> + AVD3D12VADeviceContext *device_hwctx = hwdev->hwctx;
>> +
>> + D3D12_OBJECT_RELEASE(device_hwctx->video_device);
>> +}
>> +
>> +static int d3d12va_device_create(AVHWDeviceContext *hwdev, const char
>*device,
>> + AVDictionary *opts, int flags)
>> +{
>> + AVD3D12VADeviceContext *ctx = hwdev->hwctx;
>> + D3D12VADevicePriv *priv = hwdev->internal->priv;
>> +
>> + HRESULT hr;
>> + UINT create_flags = 0;
>> + IDXGIAdapter *pAdapter = NULL;
>> +
>> + int ret;
>> + int is_debug = !!av_dict_get(opts, "debug", NULL, 0);
>> +
>> + hwdev->free = d3d12va_device_free;
>> +
>> + ret = d3d12va_load_functions(hwdev);
>> + if (ret < 0)
>> + return ret;
>> +
>> + if (is_debug) {
>> + ID3D12Debug *pDebug;
>> + if (priv->get_debug_interface && SUCCEEDED(priv-
>>get_debug_interface(&IID_ID3D12Debug, (void **)&pDebug))) {
>> + create_flags |= DXGI_CREATE_FACTORY_DEBUG;
>> + ID3D12Debug_EnableDebugLayer(pDebug);
>> + D3D12_OBJECT_RELEASE(pDebug);
>> + av_log(hwdev, AV_LOG_INFO, "D3D12 debug layer is enabled!\n");
>> + }
>> + }
>> +
>> + if (!ctx->device) {
>> + IDXGIFactory2 *pDXGIFactory = NULL;
>> +
>> + hr = priv->create_dxgi_factory2(create_flags, &IID_IDXGIFactory2, (void
>**)&pDXGIFactory);
>> + if (SUCCEEDED(hr)) {
>> + int adapter = device ? atoi(device) : 0;
>> + if (FAILED(IDXGIFactory2_EnumAdapters(pDXGIFactory, adapter,
>&pAdapter)))
>> + pAdapter = NULL;
>> + IDXGIFactory2_Release(pDXGIFactory);
>> + }
>> +
>> + if (pAdapter) {
>> + DXGI_ADAPTER_DESC desc;
>> + hr = IDXGIAdapter2_GetDesc(pAdapter, &desc);
>> + if (!FAILED(hr)) {
>> + av_log(ctx, AV_LOG_INFO, "Using device %04x:%04x (%ls).\n",
>> + desc.VendorId, desc.DeviceId, desc.Description);
>> + }
>> + }
>> +
>> + hr = priv->create_device((IUnknown *)pAdapter,
>D3D_FEATURE_LEVEL_12_0, &IID_ID3D12Device, (void **)&ctx->device);
>> + D3D12_OBJECT_RELEASE(pAdapter);
>> + if (FAILED(hr)) {
>> + av_log(ctx, AV_LOG_ERROR, "Failed to create Direct 3D 12 device
>(%lx)\n", (long)hr);
>> + return AVERROR_UNKNOWN;
>> + }
>> + }
>> +
>> + return 0;
>> +}
>> +
>> +const HWContextType ff_hwcontext_type_d3d12va = {
>> + .type = AV_HWDEVICE_TYPE_D3D12VA,
>> + .name = "D3D12VA",
>> +
>> + .device_hwctx_size = sizeof(AVD3D12VADeviceContext),
>> + .device_priv_size = sizeof(D3D12VADevicePriv),
>> + .frames_hwctx_size = sizeof(AVD3D12VAFramesContext),
>> + .frames_priv_size = sizeof(D3D12VAFramesContext),
>> +
>> + .device_create = d3d12va_device_create,
>> + .device_init = d3d12va_device_init,
>> + .device_uninit = d3d12va_device_uninit,
>> + .frames_get_constraints = d3d12va_frames_get_constraints,
>> + .frames_init = d3d12va_frames_init,
>> + .frames_uninit = d3d12va_frames_uninit,
>> + .frames_get_buffer = d3d12va_get_buffer,
>> + .transfer_get_formats = d3d12va_transfer_get_formats,
>> + .transfer_data_to = d3d12va_transfer_data,
>> + .transfer_data_from = d3d12va_transfer_data,
>> +
>> + .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_D3D12,
>AV_PIX_FMT_NONE },
>> +};
>> diff --git a/libavutil/hwcontext_d3d12va.h b/libavutil/hwcontext_d3d12va.h
>> new file mode 100644
>> index 0000000000..7802981c99
>> --- /dev/null
>> +++ b/libavutil/hwcontext_d3d12va.h
>> @@ -0,0 +1,142 @@
>> +/*
>> + * Direct3D 12 HW acceleration.
>> + *
>> + * copyright (c) 2022-2023 Wu Jianhua <> toqsxw at outlook.com> >
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
>USA
>> + */
>> +
>> +#ifndef AVUTIL_HWCONTEXT_D3D12VA_H
>> +#define AVUTIL_HWCONTEXT_D3D12VA_H
>> +
>> +/**
>> + * @file
>> + * An API-specific header for AV_HWDEVICE_TYPE_D3D12VA.
>> + *
>> + * This API does not support dynamic frame pools.
>AVHWFramesContext.pool must
>> + * contain AVBufferRefs whose data pointer points to an
>AVD3D12FrameDescriptor struct.
>> + */
>>
>
>AVD3D12FrameDescriptor doesn't exist.
>> This API does not support dynamic frame pools.
>This will mean that when this does get implemented, users may be caught off-
>guard
>and get broken.
Will implement dynamic pool on next patchset too.
>
>
>> +#include <stdint.h>
>> +#include <initguid.h>
>> +#include <d3d12.h>
>> +#include <d3d12sdklayers.h>
>> +#include <d3d12video.h>
>> +
>> +/**
>> + * @brief This struct is allocated as AVHWDeviceContext.hwctx
>> + *
>> + */
>> +typedef struct AVD3D12VADeviceContext {
>> + /**
>> + * Device used for objects creation and access. This can also be
>> + * used to set the libavcodec decoding device.
>> + *
>> + * Can be set by the user. This is the only mandatory field - the other
>> + * device context fields are set from this and are available for
>convenience.
>> + *
>> + * Deallocating the AVHWDeviceContext will always release this interface,
>> + * and it does not matter whether it was user-allocated.
>> + */
>> + ID3D12Device *device;
>> +
>> + /**
>> + * If unset, this will be set from the device field on init.
>> + *
>> + * Deallocating the AVHWDeviceContext will always release this interface,
>> + * and it does not matter whether it was user-allocated.
>> + */
>> + ID3D12VideoDevice *video_device;
>> +} AVD3D12VADeviceContext;
>> +
>> +/**
>> + * @brief This struct is used to sync d3d12 execution
>> + *
>> + */
>> +typedef struct AVD3D12VASyncContext {
>> + /**
>> + * D3D12 fence object
>> + */
>> + ID3D12Fence *fence;
>> +
>> + /**
>> + * A handle to the event object
>> + */
>> + HANDLE event;
>> +
>> + /**
>> + * The fence value used for sync
>> + */
>> + uint64_t fence_value;
>> +} AVD3D12VASyncContext;
>> +
>> +/**
>> + * @brief D3D12VA frame descriptor for pool allocation.
>> + *
>> + */
>> +typedef struct AVD3D12VAFrame {
>> + /**
>> + * The texture in which the frame is located. The reference count is
>> + * managed by the AVBufferRef, and destroying the reference will release
>> + * the interface.
>> + */
>> + ID3D12Resource *texture;
>> +
>> + /**
>> + * The index into the array texture element representing the frame
>> + */
>> + intptr_t index;
>>
>
>So, you make a single texture, and each frame of the texture is given an index.
>While this can work with dynamic allocation in the future, this is not very good,
>and could potentially get broken, as far as I can tell.
It's going to be redesigned after dynamical pool is implemented.
>
>Also, you don't expose any memory for the image, which prevents
>interoperability with other APIs (Vulkan, CUDA, OpenCL, and maybe D3D11).
>Since two of those APIs are commonly used for rendering video in players,
>you should support interoperability with them.
https://learn.microsoft.com/en-us/windows/win32/api/d3d12/nn-d3d12-id3d12resource
AFAIK, ID3D12Resource has already encapsulated memory API for the image, which is different from
VKImage and VKDeviceMemory.
>
>Also, API users have no idea about the fact that the texture is bound
>to a frames context, rather than a pool. Users are not forbidden
>from reusing AVBufferPool across AVHWFramesContext.
>
>Really, just implement dynamic allocation. The patchset is old enough
>that this shoud've happened already.
>
>
>> + /**
>> + * The sync context for the texture
>> + *
>> + * Use av_d3d12va_wait_idle(sync_ctx) to ensure the decoding or
>encoding have been finised
>> + * @see: > https://learn.microsoft.com/en-
>us/windows/win32/medfound/direct3d-12-video-overview#directx-12-fences
>> + */
>> + AVD3D12VASyncContext *sync_ctx;
>> +} AVD3D12VAFrame;
>> +
>> +/**
>> + * @brief This struct is allocated as AVHWFramesContext.hwctx
>> + *
>> + */
>> +typedef struct AVD3D12VAFramesContext {
>> + /**
>> + * This field is not able to be user-allocated at the present.
>> + */
>> + AVD3D12VAFrame *texture_infos;
>> +} AVD3D12VAFramesContext;
>> +
>> +/**
>> + * @brief Map sw pixel format to d3d12 format
>> + *
>> + * @return d3d12 specified format
>> + */
>> +DXGI_FORMAT av_d3d12va_map_sw_to_hw_format(enum AVPixelFormat
>pix_fmt);
>> +
>> +/**
>> + * @brief Allocate an AVD3D12VASyncContext
>> + *
>> + * @return Error code (ret < 0 if failed)
>> + */
>> +int av_d3d12va_sync_context_alloc(AVD3D12VADeviceContext *ctx,
>AVD3D12VASyncContext **sync_ctx);
>>
>
>Do you really need this to be dynamically allocated?
>Since it's only used in frames, which are already dynamically allocated,
>you can put all values there. New values can always be added.
>
It might not be necessary for API users. I could remove it.
>
>> +/**
>> + * @brief Free an AVD3D12VASyncContext
>> + */
>> +void av_d3d12va_sync_context_free(AVD3D12VASyncContext **sync_ctx);
>> +
>> +#endif /* AVUTIL_HWCONTEXT_D3D12VA_H */
>> diff --git a/libavutil/hwcontext_d3d12va_internal.h
>b/libavutil/hwcontext_d3d12va_internal.h
>> new file mode 100644
>> index 0000000000..bfd89b3545
>> --- /dev/null
>> +++ b/libavutil/hwcontext_d3d12va_internal.h
>> @@ -0,0 +1,59 @@
>> +/*
>> + * Direct3D 12 HW acceleration.
>> + *
>> + * copyright (c) 2022-2023 Wu Jianhua <> toqsxw at outlook.com> >
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
>USA
>> + */
>> +
>> +#ifndef AVUTIL_HWCONTEXT_D3D12VA_INTERNAL_H
>> +#define AVUTIL_HWCONTEXT_D3D12VA_INTERNAL_H
>> +
>> +/**
>> + * @def COBJMACROS
>> + *
>> + * @brief Enable C style interface for D3D12
>> + */
>> +#ifndef COBJMACROS
>> +#define COBJMACROS
>> +#endif
>> +
>> +/**
>> + * @def DX_CHECK
>> + *
>> + * @brief A check macro used by D3D12 functions highly frequently
>> + */
>> +#define DX_CHECK(hr) \
>> + do { \
>> + if (FAILED(hr)) \
>> + goto fail; \
>> + } while (0)
>> +
>> +/**
>> + * @def D3D12_OBJECT_RELEASE
>> + *
>> + * @brief A release macro used by D3D12 objects highly frequently
>> + */
>> +#define D3D12_OBJECT_RELEASE(pInterface) \
>> + do { \
>> + if (pInterface) { \
>> + IUnknown_Release((IUnknown *)pInterface); \
>> + pInterface = NULL; \
>> + } \
>> + } while (0)
>> +
>> +#endif /* AVUTIL_HWCONTEXT_D3D12VA_INTERNAL_H */
>> \ No newline at end of file
>> diff --git a/libavutil/hwcontext_internal.h b/libavutil/hwcontext_internal.h
>> index e6266494ac..4df516ee6a 100644
>> --- a/libavutil/hwcontext_internal.h
>> +++ b/libavutil/hwcontext_internal.h
>> @@ -165,6 +165,7 @@ int ff_hwframe_map_replace(AVFrame *dst, const
>AVFrame *src);
>>
>> extern const HWContextType ff_hwcontext_type_cuda;
>> extern const HWContextType ff_hwcontext_type_d3d11va;
>> +extern const HWContextType ff_hwcontext_type_d3d12va;
>> extern const HWContextType ff_hwcontext_type_drm;
>> extern const HWContextType ff_hwcontext_type_dxva2;
>> extern const HWContextType ff_hwcontext_type_opencl;
>> diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
>> index 4e4a63e287..0db4167934 100644
>> --- a/libavutil/pixdesc.c
>> +++ b/libavutil/pixdesc.c
>> @@ -2311,6 +2311,10 @@ static const AVPixFmtDescriptor
>av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
>> .name = "d3d11",
>> .flags = AV_PIX_FMT_FLAG_HWACCEL,
>> },
>> + [AV_PIX_FMT_D3D12] = {
>> + .name = "d3d12",
>> + .flags = AV_PIX_FMT_FLAG_HWACCEL,
>> + },
>> [AV_PIX_FMT_GBRPF32BE] = {
>> .name = "gbrpf32be",
>> .nb_components = 3,
>> diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
>> index a26c72d56b..58f9ad28bd 100644
>> --- a/libavutil/pixfmt.h
>> +++ b/libavutil/pixfmt.h
>> @@ -429,6 +429,13 @@ enum AVPixelFormat {
>> AV_PIX_FMT_GBRAP14BE, ///< planar GBR 4:4:4:4 56bpp, big-endian
>> AV_PIX_FMT_GBRAP14LE, ///< planar GBR 4:4:4:4 56bpp, little-endian
>>
>> + /**
>> + * Hardware surfaces for Direct3D 12.
>> + *
>> + * data[0] points to an AVD3D12VAFrame
>> + */
>> + AV_PIX_FMT_D3D12,
>> +
>> AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you
>want to link with shared libav* because the number of formats might differ
>between versions
>> };
>>
>> diff --git a/libavutil/tests/hwdevice.c b/libavutil/tests/hwdevice.c
>> index c57586613a..9d7964f9ee 100644
>> --- a/libavutil/tests/hwdevice.c
>> +++ b/libavutil/tests/hwdevice.c
>> @@ -137,6 +137,8 @@ static const struct {
>> { "0", "1", "2" } },
>> { AV_HWDEVICE_TYPE_D3D11VA,
>> { "0", "1", "2" } },
>> + { AV_HWDEVICE_TYPE_D3D12VA,
>> + { "0", "1", "2" } },
>> { AV_HWDEVICE_TYPE_OPENCL,
>> { "0.0", "0.1", "1.0", "1.1" } },
>> { AV_HWDEVICE_TYPE_VAAPI,
>> diff --git a/libavutil/version.h b/libavutil/version.h
>> index 589a42b0fa..c5fa7c3692 100644
>> --- a/libavutil/version.h
>> +++ b/libavutil/version.h
>> @@ -79,7 +79,7 @@
>> */
>>
>> #define LIBAVUTIL_VERSION_MAJOR 58
>> -#define LIBAVUTIL_VERSION_MINOR 31
>> +#define LIBAVUTIL_VERSION_MINOR 32
>> #define LIBAVUTIL_VERSION_MICRO 100
>>
>> #define LIBAVUTIL_VERSION_INT
>AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
>> --
>> 2.41.0.windows <http://2.41.0.windows>> .1
>>
>> _______________________________________________
>> ffmpeg-devel mailing list
>> ffmpeg-devel at ffmpeg.org
>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> To unsubscribe, visit link above, or email
>> ffmpeg-devel-request at ffmpeg.org> with subject "unsubscribe".
>>
>
>Overall, it is not bad, but really lacks dynamic allocation, frame uploads,
>and interoperability, which are pretty fundamental features.
>
>Also, I think you must implement support for this patch in mpv, or
>work with someone else on the mpv team who can, or implement
>support for hardware acceleration in ffplay (like a current patch which adds
>support for Vulkan rendering). It will not be good at all if this patchset turns
>out to be unusable in video players after all. We do have a requirement where
>any public API must have at least a single public external API user, though
>we don't enforce it often as we rarely add large API patches.
In summary, I'll update uploading and dynamic allocation for next patch set.
And I think this patch set is mainly about decoding. This hardware decoder
can be tested by downloading to system memory and it works well and is functionally intact.
It is good to have video player support it. But I think we could do it step by step.
I'd be appreciated if we get this merged to make the first step.
Thanks,
Tong
>_______________________________________________
>ffmpeg-devel mailing list
>ffmpeg-devel at ffmpeg.org
>https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
>To unsubscribe, visit link above, or email
>ffmpeg-devel-request at ffmpeg.org with subject "unsubscribe".
More information about the ffmpeg-devel
mailing list