[FFmpeg-devel] [PATCH 02/10, v2] avcodec: add amfdec.
Dmitrii Ovchinnikov
ovchinnikov.dmitrii at gmail.com
Wed May 1 21:38:01 EEST 2024
From: Evgeny Pavlov <lucenticus at gmail.com>
Added AMF based h264, hevc, av1 decoders.
Co-authored-by: Dmitrii Ovchinnikov <ovchinnikov.dmitrii at gmail.com>
v2: added encoder reinitialisation
---
libavcodec/Makefile | 7 +-
libavcodec/allcodecs.c | 3 +
libavcodec/amfdec.c | 719 +++++++++++++++++++++++++++++++++++++++++
libavcodec/amfdec.h | 72 +++++
4 files changed, 799 insertions(+), 2 deletions(-)
create mode 100644 libavcodec/amfdec.c
create mode 100644 libavcodec/amfdec.h
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index cff6347bdb..bba5191a70 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -70,7 +70,7 @@ include $(SRC_PATH)/libavcodec/vvc/Makefile
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o ac3.o ac3tab.o
OBJS-$(CONFIG_ADTS_HEADER) += adts_header.o mpeg4audio_sample_rates.o
-OBJS-$(CONFIG_AMF) += amfenc.o
+OBJS-$(CONFIG_AMF) += amfenc.o amfdec.o
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
OBJS-$(CONFIG_ATSC_A53) += atsc_a53.o
OBJS-$(CONFIG_AUDIODSP) += audiodsp.o
@@ -167,6 +167,7 @@ OBJS-$(CONFIG_TEXTUREDSPENC) += texturedspenc.o
OBJS-$(CONFIG_TPELDSP) += tpeldsp.o
OBJS-$(CONFIG_VAAPI_ENCODE) += vaapi_encode.o
OBJS-$(CONFIG_AV1_AMF_ENCODER) += amfenc_av1.o
+OBJS-$(CONFIG_AV1_AMF_DECODER) += amfdec.o
OBJS-$(CONFIG_VC1DSP) += vc1dsp.o
OBJS-$(CONFIG_VIDEODSP) += videodsp.o
OBJS-$(CONFIG_VP3DSP) += vp3dsp.o
@@ -411,6 +412,7 @@ OBJS-$(CONFIG_H264_DECODER) += h264dec.o h264_cabac.o h264_cavlc.o \
h264_refs.o \
h264_slice.o h264data.o h274.o
OBJS-$(CONFIG_H264_AMF_ENCODER) += amfenc_h264.o
+OBJS-$(CONFIG_H264_AMF_DECODER) += amfdec.o
OBJS-$(CONFIG_H264_CUVID_DECODER) += cuviddec.o
OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_H264_MEDIACODEC_ENCODER) += mediacodecenc.o
@@ -437,6 +439,7 @@ OBJS-$(CONFIG_HEVC_DECODER) += hevcdec.o hevc_mvs.o \
hevcdsp.o hevc_filter.o hevc_data.o \
h274.o aom_film_grain.o
OBJS-$(CONFIG_HEVC_AMF_ENCODER) += amfenc_hevc.o
+OBJS-$(CONFIG_HEVC_AMF_DECODER) += amfdec.o
OBJS-$(CONFIG_HEVC_CUVID_DECODER) += cuviddec.o
OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_HEVC_MEDIACODEC_ENCODER) += mediacodecenc.o
@@ -1265,7 +1268,7 @@ SKIPHEADERS += %_tablegen.h \
bitstream_template.h \
$(ARCH)/vpx_arith.h \
-SKIPHEADERS-$(CONFIG_AMF) += amfenc.h
+SKIPHEADERS-$(CONFIG_AMF) += amfenc.h amfdec.h
SKIPHEADERS-$(CONFIG_D3D11VA) += d3d11va.h dxva2_internal.h
SKIPHEADERS-$(CONFIG_D3D12VA) += d3d12va_decode.h
SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index f4705651fb..58a49b5825 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -843,10 +843,12 @@ extern const FFCodec ff_av1_nvenc_encoder;
extern const FFCodec ff_av1_qsv_decoder;
extern const FFCodec ff_av1_qsv_encoder;
extern const FFCodec ff_av1_amf_encoder;
+extern const FFCodec ff_av1_amf_decoder;
extern const FFCodec ff_av1_vaapi_encoder;
extern const FFCodec ff_libopenh264_encoder;
extern const FFCodec ff_libopenh264_decoder;
extern const FFCodec ff_h264_amf_encoder;
+extern const FFCodec ff_h264_amf_decoder;
extern const FFCodec ff_h264_cuvid_decoder;
extern const FFCodec ff_h264_mf_encoder;
extern const FFCodec ff_h264_nvenc_encoder;
@@ -856,6 +858,7 @@ extern const FFCodec ff_h264_v4l2m2m_encoder;
extern const FFCodec ff_h264_vaapi_encoder;
extern const FFCodec ff_h264_videotoolbox_encoder;
extern const FFCodec ff_hevc_amf_encoder;
+extern const FFCodec ff_hevc_amf_decoder;
extern const FFCodec ff_hevc_cuvid_decoder;
extern const FFCodec ff_hevc_mediacodec_decoder;
extern const FFCodec ff_hevc_mediacodec_encoder;
diff --git a/libavcodec/amfdec.c b/libavcodec/amfdec.c
new file mode 100644
index 0000000000..c41ad3c2db
--- /dev/null
+++ b/libavcodec/amfdec.c
@@ -0,0 +1,719 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <AMF/core/Variant.h>
+#include <AMF/core/PropertyStorage.h>
+#include <AMF/components/FFMPEGFileDemuxer.h>
+#include "libavutil/hwcontext_amf.h"
+#include "libavutil/hwcontext_amf_internal.h"
+#include "amfdec.h"
+#include "codec_internal.h"
+#include "hwconfig.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/time.h"
+#include "decode.h"
+#include "libavutil/mastering_display_metadata.h"
+
+#if CONFIG_D3D11VA
+#include "libavutil/hwcontext_d3d11va.h"
+#endif
+#if CONFIG_DXVA2
+#define COBJMACROS
+#include "libavutil/hwcontext_dxva2.h"
+#endif
+
+#ifdef _WIN32
+#include "compat/w32dlfcn.h"
+#else
+#include <dlfcn.h>
+#endif
+
+#define propNotFound 0
+
+const enum AVPixelFormat amf_dec_pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB,
+ AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_YUYV422,
+ AV_PIX_FMT_P010,
+ AV_PIX_FMT_P012,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV420P16,
+#if CONFIG_D3D11VA
+ AV_PIX_FMT_D3D11,
+#endif
+#if CONFIG_DXVA2
+ AV_PIX_FMT_DXVA2_VLD,
+#endif
+ AV_PIX_FMT_AMF_SURFACE,
+ AV_PIX_FMT_NONE
+};
+
+static const AVCodecHWConfigInternal *const amf_hw_configs[] = {
+ &(const AVCodecHWConfigInternal) {
+ .public = {
+ .pix_fmt = AV_PIX_FMT_AMF_SURFACE,
+ .methods = AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX |
+ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX,
+ .device_type = AV_HWDEVICE_TYPE_AMF,
+ },
+ .hwaccel = NULL,
+ },
+ NULL
+};
+
+static void amf_free_amfsurface(void *opaque, uint8_t *data)
+{
+ AMFSurface *surface = (AMFSurface*)(data);
+ surface->pVtbl->Release(surface);
+}
+
+static int amf_init_decoder(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AVAMFDeviceContextInternal * internal = (AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data;
+ const wchar_t *codec_id = NULL;
+ AMF_RESULT res;
+ AMFBuffer *buffer;
+ amf_int64 color_profile;
+ int pool_size = 36;
+
+ ctx->output_format = AMF_SURFACE_UNKNOWN;
+ if (avctx->pix_fmt == AV_PIX_FMT_AMF_SURFACE){
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ ctx->output_format = av_amf_av_to_amf_format(hwframes_ctx->sw_format);
+ } else
+ ctx->output_format = av_amf_av_to_amf_format(avctx->sw_pix_fmt);
+ } else
+ ctx->output_format = av_amf_av_to_amf_format(avctx->pix_fmt);
+
+ if (ctx->output_format == AMF_SURFACE_UNKNOWN)
+ ctx->output_format = AMF_SURFACE_NV12;
+
+ ctx->drain = 0;
+ ctx->resolution_changed = 0;
+
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ codec_id = AMFVideoDecoderUVD_H264_AVC;
+ break;
+ case AV_CODEC_ID_HEVC: {
+ if (ctx->output_format == AMF_SURFACE_P010)
+ codec_id = AMFVideoDecoderHW_H265_MAIN10;
+ else
+ codec_id = AMFVideoDecoderHW_H265_HEVC;
+ } break;
+ case AV_CODEC_ID_AV1:
+ if (ctx->output_format == AMF_SURFACE_P012)
+ codec_id = AMFVideoDecoderHW_AV1_12BIT;
+ else
+ codec_id = AMFVideoDecoderHW_AV1;
+ break;
+ default:
+ break;
+ }
+ AMF_RETURN_IF_FALSE(ctx, codec_id != NULL, AVERROR(EINVAL), "Codec %d is not supported\n", avctx->codec->id);
+
+ res = internal->factory->pVtbl->CreateComponent(internal->factory, internal->context, codec_id, &ctx->decoder);
+ AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", codec_id, res);
+
+ // Color Metadata
+ /// Color Range (Support for older Drivers)
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->decoder, AMF_VIDEO_DECODER_FULL_RANGE_COLOR, 1);
+ } else if (avctx->color_range != AVCOL_RANGE_UNSPECIFIED) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->decoder, AMF_VIDEO_DECODER_FULL_RANGE_COLOR, 0);
+ }
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+ switch (avctx->colorspace) {
+ case AVCOL_SPC_SMPTE170M:
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601;
+ } else {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601;
+ }
+ break;
+ case AVCOL_SPC_BT709:
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709;
+ } else {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709;
+ }
+ break;
+ case AVCOL_SPC_BT2020_NCL:
+ case AVCOL_SPC_BT2020_CL:
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020;
+ } else {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020;
+ }
+ break;
+ }
+ if (color_profile != AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_PROFILE, color_profile);
+ if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+
+ if (avctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
+
+ if (ctx->timestamp_mode != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_TIMESTAMP_MODE, ctx->timestamp_mode);
+ if (ctx->decoder_mode != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_REORDER_MODE, ctx->decoder_mode);
+ if (ctx->dpb_size != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_DPB_SIZE, ctx->dpb_size);
+ if (ctx->lowlatency != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_LOW_LATENCY, ctx->lowlatency);
+ if (ctx->smart_access_video != -1) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+ if (res != AMF_OK) {
+ av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF decoder.\n");
+ return AVERROR(EINVAL);
+ } else {
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+ // Set low latency mode if Smart Access Video is enabled
+ if (ctx->smart_access_video != 0) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_LOW_LATENCY, true);
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode for decoder.\n");
+ }
+ }
+ }
+ if (ctx->skip_transfer_sav != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SKIP_TRANSFER_SMART_ACCESS_VIDEO, ctx->skip_transfer_sav);
+
+ if (avctx->extradata_size) {
+ res = internal->context->pVtbl->AllocBuffer(internal->context, AMF_MEMORY_HOST, avctx->extradata_size, &buffer);
+ if (res == AMF_OK) {
+ memcpy(buffer->pVtbl->GetNative(buffer), avctx->extradata, avctx->extradata_size);
+ AMF_ASSIGN_PROPERTY_INTERFACE(res,ctx->decoder, AMF_VIDEO_DECODER_EXTRADATA, buffer);
+ buffer->pVtbl->Release(buffer);
+ buffer = NULL;
+ }
+ }
+ if (ctx->surface_pool_size == -1) {
+ ctx->surface_pool_size = pool_size;
+ if (avctx->extra_hw_frames > 0)
+ ctx->surface_pool_size += avctx->extra_hw_frames;
+ if (avctx->active_thread_type & FF_THREAD_FRAME)
+ ctx->surface_pool_size += avctx->thread_count;
+ }
+
+ //at the moment, there is such a restriction in AMF.
+ //when it is possible, I will remove this code
+ if (ctx->surface_pool_size > 100)
+ ctx->surface_pool_size = 100;
+
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SURFACE_POOL_SIZE, ctx->surface_pool_size);
+ res = ctx->decoder->pVtbl->Init(ctx->decoder, ctx->output_format, avctx->width, avctx->height);
+
+ return 0;
+}
+
+static int amf_init_decoder_context(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ int ret;
+
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ ret = av_amf_context_derive((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, frames_ctx->device_ctx, NULL, 0);
+ if (ret < 0)
+ return ret;
+ ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
+ if (!ctx->hw_frames_ctx)
+ return AVERROR(ENOMEM);
+ }
+ else if (avctx->hw_device_ctx) {
+ AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ ret = av_amf_context_derive((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, device_ctx, NULL, 0);
+ if (ret < 0)
+ return ret;
+ ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
+ if (!ctx->hw_device_ctx)
+ return AVERROR(ENOMEM);
+ } else {
+ ret = av_amf_context_init((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, avctx);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int amf_decode_close(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+
+ if (ctx->decoder) {
+ ctx->decoder->pVtbl->Terminate(ctx->decoder);
+ ctx->decoder->pVtbl->Release(ctx->decoder);
+ ctx->decoder = NULL;
+ }
+
+ av_buffer_unref(&ctx->amf_device_ctx_internal);
+ av_buffer_unref(&ctx->hw_device_ctx);
+ av_buffer_unref(&ctx->hw_frames_ctx);
+ av_buffer_unref(&ctx->amf_device_ctx);
+
+ av_packet_unref(ctx->in_pkt);
+
+ return 0;
+
+}
+
+static int amf_decode_init(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ int ret;
+ enum AVPixelFormat pix_fmts[3] = {
+ AV_PIX_FMT_AMF_SURFACE,
+ avctx->pix_fmt,
+ AV_PIX_FMT_NONE };
+
+ ret = ff_get_format(avctx, pix_fmts);
+ if (ret < 0) {
+ avctx->pix_fmt = AV_PIX_FMT_NONE;
+ }
+ ctx->in_pkt = av_packet_alloc();
+ if (!ctx->in_pkt)
+ return AVERROR(ENOMEM);
+
+ if (avctx->hw_frames_ctx){
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ if (frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+ AVAMFDeviceContext * amf_ctx = frames_ctx->device_ctx->hwctx;
+ ctx->amf_device_ctx_internal = av_buffer_ref(amf_ctx->internal);
+ }
+ }
+ else if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_AMF_SURFACE) {
+ AVHWDeviceContext *hwdev_ctx;
+ AVHWFramesContext *hwframes_ctx;
+ hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ if (hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF)
+ {
+ AVAMFDeviceContext * amf_ctx = hwdev_ctx->hwctx;
+ ctx->amf_device_ctx_internal = av_buffer_ref(amf_ctx->internal);
+ }
+
+ avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
+
+ if (!avctx->hw_frames_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
+ return AVERROR(ENOMEM);
+ }
+
+ hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
+ hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
+ hwframes_ctx->format = AV_PIX_FMT_AMF_SURFACE;
+ hwframes_ctx->sw_format = avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10 ? AV_PIX_FMT_P010 : AV_PIX_FMT_NV12;
+ hwframes_ctx->initial_pool_size = ctx->surface_pool_size + 8;
+ avctx->pix_fmt = AV_PIX_FMT_AMF_SURFACE;
+
+ ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
+
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error initializing a AMF frame pool\n");
+ av_buffer_unref(&avctx->hw_frames_ctx);
+ return ret;
+ }
+ } else {
+ AVAMFDeviceContextInternal *wrapped = av_mallocz(sizeof(*wrapped));
+ ctx->amf_device_ctx_internal = av_buffer_create((uint8_t *)wrapped, sizeof(*wrapped),
+ av_amf_context_internal_free, NULL, 0);
+ if ((ret = av_amf_context_internal_create((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, avctx, "", NULL, 0)) != 0) {
+ amf_decode_close(avctx);
+ return ret;
+ }
+ if ((ret = amf_init_decoder_context(avctx)) != 0) {
+ return ret;
+ }
+ }
+ if ((ret = amf_init_decoder(avctx)) == 0) {
+ return 0;
+ }
+ amf_decode_close(avctx);
+ return ret;
+}
+
+static AMF_RESULT amf_get_property_buffer(AMFData *object, const wchar_t *name, AMFBuffer **val)
+{
+ AMF_RESULT res;
+ AMFVariantStruct var;
+ res = AMFVariantInit(&var);
+ if (res == AMF_OK) {
+ res = object->pVtbl->GetProperty(object, name, &var);
+ if (res == AMF_OK) {
+ if (var.type == AMF_VARIANT_INTERFACE) {
+ AMFGuid guid_AMFBuffer = IID_AMFBuffer();
+ AMFInterface *amf_interface = AMFVariantInterface(&var);
+ res = amf_interface->pVtbl->QueryInterface(amf_interface, &guid_AMFBuffer, (void**)val);
+ } else {
+ res = AMF_INVALID_DATA_TYPE;
+ }
+ }
+ AMFVariantClear(&var);
+ }
+ return res;
+}
+
+static int amf_amfsurface_to_avframe(AVCodecContext *avctx, AMFSurface* surface, AVFrame *frame)
+{
+ AMFVariantStruct var = {0};
+ AMFPlane *plane;
+ int i;
+ int ret;
+
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ if (hwframes_ctx->format == AV_PIX_FMT_AMF_SURFACE) {
+ ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Get hw frame failed.\n");
+ return ret;
+ }
+ //we need to release surface with frame to return it to decoder
+ frame->buf[1] = av_buffer_create((uint8_t *)surface, sizeof(AMFSurface),
+ amf_free_amfsurface, (void*)avctx,
+ AV_BUFFER_FLAG_READONLY);
+ frame->data[0] = (uint8_t *)surface;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Unknown format for hwframes_ctx\n");
+ return AVERROR(ENOMEM);
+ }
+ } else {
+ ret = surface->pVtbl->Convert(surface, AMF_MEMORY_HOST);
+ AMF_RETURN_IF_FALSE(avctx, ret == AMF_OK, AVERROR_UNKNOWN, "Convert(amf::AMF_MEMORY_HOST) failed with error %d\n", ret);
+
+ for (i = 0; i < surface->pVtbl->GetPlanesCount(surface); i++) {
+ plane = surface->pVtbl->GetPlaneAt(surface, i);
+ frame->data[i] = plane->pVtbl->GetNative(plane);
+ frame->linesize[i] = plane->pVtbl->GetHPitch(plane);
+ }
+
+ frame->buf[0] = av_buffer_create((uint8_t *)surface, sizeof(AMFSurface),
+ amf_free_amfsurface, (void*)avctx,
+ AV_BUFFER_FLAG_READONLY);
+ frame->format = av_amf_to_av_format(surface->pVtbl->GetFormat(surface));
+ }
+
+ frame->width = avctx->width;
+ frame->height = avctx->height;
+
+ frame->pts = surface->pVtbl->GetPts(surface);
+
+ surface->pVtbl->GetProperty(surface, L"FFMPEG:dts", &var);
+ frame->pkt_dts = var.int64Value;
+
+ frame->duration = surface->pVtbl->GetDuration(surface);
+ if (frame->duration < 0)
+ frame->duration = 0;
+
+ frame->color_range = avctx->color_range;
+ frame->colorspace = avctx->colorspace;
+ frame->color_trc = avctx->color_trc;
+ frame->color_primaries = avctx->color_primaries;
+
+ if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
+ AMFBuffer * hdrmeta_buffer = NULL;
+ ret = amf_get_property_buffer((AMFData *)surface, AMF_VIDEO_DECODER_HDR_METADATA, &hdrmeta_buffer);
+ if (hdrmeta_buffer != NULL) {
+ AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer);
+ if (ret != AMF_OK)
+ return ret;
+ if (hdrmeta != NULL) {
+ AVMasteringDisplayMetadata *mastering = av_mastering_display_metadata_create_side_data(frame);
+ const int chroma_den = 50000;
+ const int luma_den = 10000;
+
+ if (!mastering)
+ return AVERROR(ENOMEM);
+
+ mastering->display_primaries[0][0] = av_make_q(hdrmeta->redPrimary[0], chroma_den);
+ mastering->display_primaries[0][1] = av_make_q(hdrmeta->redPrimary[1], chroma_den);
+
+ mastering->display_primaries[1][0] = av_make_q(hdrmeta->greenPrimary[0], chroma_den);
+ mastering->display_primaries[1][1] = av_make_q(hdrmeta->greenPrimary[1], chroma_den);
+
+ mastering->display_primaries[2][0] = av_make_q(hdrmeta->bluePrimary[0], chroma_den);
+ mastering->display_primaries[2][1] = av_make_q(hdrmeta->bluePrimary[1], chroma_den);
+
+ mastering->white_point[0] = av_make_q(hdrmeta->whitePoint[0], chroma_den);
+ mastering->white_point[1] = av_make_q(hdrmeta->whitePoint[1], chroma_den);
+
+ mastering->max_luminance = av_make_q(hdrmeta->maxMasteringLuminance, luma_den);
+ mastering->min_luminance = av_make_q(hdrmeta->maxMasteringLuminance, luma_den);
+
+ mastering->has_luminance = 1;
+ mastering->has_primaries = 1;
+ if (hdrmeta->maxContentLightLevel) {
+ AVContentLightMetadata *light = av_content_light_metadata_create_side_data(frame);
+
+ if (!light)
+ return AVERROR(ENOMEM);
+
+ light->MaxCLL = hdrmeta->maxContentLightLevel;
+ light->MaxFALL = hdrmeta->maxFrameAverageLightLevel;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static AMF_RESULT amf_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AMF_RESULT ret = AMF_OK;
+ AMFSurface *surface = NULL;
+ AMFData *data_out = NULL;
+
+ ret = ctx->decoder->pVtbl->QueryOutput(ctx->decoder, &data_out);
+
+ if (ret != AMF_OK && ret != AMF_REPEAT) {
+ return ret;
+ }
+ if (data_out == NULL) {
+ return AMF_REPEAT;
+ }
+
+ if (data_out) {
+ AMFGuid guid = IID_AMFSurface();
+ data_out->pVtbl->QueryInterface(data_out, &guid, (void**)&surface); // query for buffer interface
+ data_out->pVtbl->Release(data_out);
+ data_out = NULL;
+ }
+
+ ret = amf_amfsurface_to_avframe(avctx, surface, frame);
+ AMF_GOTO_FAIL_IF_FALSE(avctx, ret >= 0, AMF_FAIL, "Failed to convert AMFSurface to AVFrame = %d\n", ret);
+ return AMF_OK;
+fail:
+
+ if (surface) {
+ surface->pVtbl->Release(surface);
+ surface = NULL;
+ }
+ return ret;
+}
+
+static AMF_RESULT amf_update_buffer_properties(AVCodecContext *avctx, AMFBuffer* buffer, const AVPacket* pkt)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AVAMFDeviceContextInternal * internal = (AVAMFDeviceContextInternal * )ctx->amf_device_ctx_internal->data;
+ AMFContext *ctxt = internal->context;
+
+ AMF_RESULT res;
+
+ AMF_RETURN_IF_FALSE(ctxt, buffer != NULL, AMF_INVALID_ARG, "update_buffer_properties() - buffer not passed in");
+ AMF_RETURN_IF_FALSE(ctxt, pkt != NULL, AMF_INVALID_ARG, "update_buffer_properties() - packet not passed in");
+ buffer->pVtbl->SetPts(buffer, pkt->pts);
+ buffer->pVtbl->SetDuration(buffer, pkt->duration);
+ AMF_ASSIGN_PROPERTY_INT64(res, buffer, L"FFMPEG:dts", pkt->dts);
+ if (res != AMF_OK)
+ av_log(avctx, AV_LOG_VERBOSE, "Failed to assign dts value.");
+ return AMF_OK;
+}
+
+static AMF_RESULT amf_buffer_from_packet(AVCodecContext *avctx, const AVPacket* pkt, AMFBuffer** buffer)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AVAMFDeviceContextInternal * internal = (AVAMFDeviceContextInternal * )ctx->amf_device_ctx_internal->data;
+ AMFContext *ctxt = internal->context;
+ void *mem;
+ AMF_RESULT err;
+ AMFBuffer *buf = NULL;
+
+ AMF_RETURN_IF_FALSE(ctxt, pkt != NULL, AMF_INVALID_ARG, "amf_buffer_from_packet() - packet not passed in");
+ AMF_RETURN_IF_FALSE(ctxt, buffer != NULL, AMF_INVALID_ARG, "amf_buffer_from_packet() - buffer pointer not passed in");
+
+ err = ctxt->pVtbl->AllocBuffer(ctxt, AMF_MEMORY_HOST, pkt->size + AV_INPUT_BUFFER_PADDING_SIZE, buffer);
+ AMF_RETURN_IF_FALSE(ctxt, err == AMF_OK, err, "amf_buffer_from_packet() - failed");
+ buf = *buffer;
+ err = buf->pVtbl->SetSize(buf, pkt->size);
+ AMF_RETURN_IF_FALSE(ctxt, err == AMF_OK, err, "amf_buffer_from_packet() - SetSize failed");
+ // get the memory location and check the buffer was indeed allocated
+ mem = buf->pVtbl->GetNative(buf);
+ AMF_RETURN_IF_FALSE(ctxt, mem != NULL, AMF_INVALID_POINTER, "amf_buffer_from_packet() - GetNative failed");
+
+ // copy the packet memory and clear data padding
+ memcpy(mem, pkt->data, pkt->size);
+ memset((amf_int8*)(mem)+pkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ return amf_update_buffer_properties(avctx, buf, pkt);
+}
+
+static int amf_decode_frame(AVCodecContext *avctx, struct AVFrame *frame)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AMFBuffer *buf;
+ AMF_RESULT res;
+ int got_frame = 0;
+ AVPacket *avpkt = ctx->in_pkt;
+
+ if (!ctx->decoder)
+ return AVERROR(EINVAL);
+ // get packet if needed
+ if(!ctx->drain){
+ if(ctx->resolution_changed)
+ ctx->resolution_changed = 0;
+ else{
+ av_packet_unref(avpkt);
+ int ret;
+ ret = ff_decode_get_packet(avctx, avpkt);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ if (ret == AVERROR_EOF) {
+ //nothing to consume, start external drain
+ ctx->decoder->pVtbl->Drain(ctx->decoder);
+ ctx->drain = 1;
+ }
+ }
+ }
+
+ if (!ctx->drain) {
+ // submit frame
+ res = amf_buffer_from_packet(avctx, avpkt, &buf);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, 0, "Cannot convert AVPacket to AMFbuffer");
+ res = ctx->decoder->pVtbl->SubmitInput(ctx->decoder, (AMFData*) buf);
+ buf->pVtbl->Release(buf);
+
+ if(res == AMF_DECODER_NO_FREE_SURFACES) {
+ // input is not consumed, need to QueryOutput and submit again
+ av_log(avctx, AV_LOG_VERBOSE, "SubmitInput() returned NO_FREE_SURFACES\n");
+ } else if (res == AMF_RESOLUTION_CHANGED) {
+ //input is not consumed, start internal drain
+ ctx->decoder->pVtbl->Drain(ctx->decoder);
+ ctx->drain = 1;
+ // process resolution_changed when internal drain is complete
+ ctx->resolution_changed = 1;
+ res = AMF_OK;
+ } else if (res != AMF_OK && res != AMF_NEED_MORE_INPUT && res != AMF_REPEAT) {
+ av_log(avctx, AV_LOG_ERROR, "SubmitInput() returned error %d\n", res);
+ return AVERROR(EINVAL);
+ }
+
+ }
+
+ res = amf_receive_frame(avctx, frame);
+ if (res == AMF_OK)
+ got_frame = 1;
+ else if (res == AMF_REPEAT)
+ // decoder has no output yet
+ res = AMF_OK;
+ else if (res == AMF_EOF) {
+ // drain is complete
+ ctx->drain = 0;
+ if(ctx->resolution_changed){
+ // re-initialze decoder
+ AMFVariantStruct size_var = {0};
+ res = ctx->decoder->pVtbl->GetProperty(ctx->decoder, AMF_VIDEO_DECODER_CURRENT_SIZE, &size_var);
+ if (res != AMF_OK) {
+ return AVERROR(EINVAL);
+ }
+
+ avctx->width = size_var.sizeValue.width;
+ avctx->height = size_var.sizeValue.height;
+ avctx->coded_width = size_var.sizeValue.width;
+ avctx->coded_height = size_var.sizeValue.height;
+ res = ctx->decoder->pVtbl->ReInit(ctx->decoder, avctx->width, avctx->height);
+ if (res != AMF_OK) {
+ av_log(avctx, AV_LOG_ERROR, "ReInit() returned %d\n", res);
+ return AVERROR(EINVAL);
+ }
+
+ }else
+ return AVERROR_EOF;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Unkown result from QueryOutput %d\n", res);
+ }
+ return got_frame ? 0 : AVERROR(EAGAIN);
+}
+
+static void amf_decode_flush(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ ctx->decoder->pVtbl->Flush(ctx->decoder);
+}
+
+#define OFFSET(x) offsetof(AvAmfDecoderContext, x)
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ // Decoder mode
+ { "decoder_mode", "Decoder mode", OFFSET(decoder_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AMF_VIDEO_DECODER_MODE_LOW_LATENCY, VD, "decoder_mode" },
+ { "regular", "DPB delay is based on number of reference frames + 1", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_REGULAR }, 0, 0, VD, "decoder_mode" },
+ { "compliant", "DPB delay is based on profile - up to 16", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_COMPLIANT }, 0, 0, VD, "decoder_mode" },
+ { "low_latency", "DPB delay is 0", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_LOW_LATENCY }, 0, 0, VD, "decoder_mode" },
+
+ // Timestamp mode
+ { "timestamp_mode", "Timestamp mode", OFFSET(timestamp_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AMF_TS_DECODE, VD, "timestamp_mode" },
+ { "presentation", "Preserve timestamps from input to output", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_PRESENTATION }, 0, 0, VD, "timestamp_mode" },
+ { "sort", "Resort PTS list", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_SORT }, 0, 0, VD, "timestamp_mode" },
+ { "decode", "Decode order", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_DECODE }, 0, 0, VD, "timestamp_mode" },
+
+ // Reference frame management
+ { "surface_pool_size", "Number of surfaces in the decode pool", OFFSET(surface_pool_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VD, NULL },
+ { "dpb_size", "Minimum number of surfaces for reordering", OFFSET(dpb_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 32, VD, NULL },
+
+ { "lowlatency", "Low latency", OFFSET(lowlatency), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+ { "smart_access_video", "Smart Access Video", OFFSET(smart_access_video), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+ { "skip_transfer_sav", "Skip transfer on another GPU when SAV enabled", OFFSET(skip_transfer_sav), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+
+ { NULL }
+};
+
+static const AVClass amf_decode_class = {
+ .class_name = "amf",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+// FF_CODEC_DECODE_CB(amf_decode_frame),
+
+#define DEFINE_AMF_DECODER(x, X, bsf_name) \
+const FFCodec ff_##x##_amf_decoder = { \
+ .p.name = #x "_amf", \
+ CODEC_LONG_NAME(#X " AMD AMF video decoder"), \
+ .priv_data_size = sizeof(AvAmfDecoderContext), \
+ .p.type = AVMEDIA_TYPE_VIDEO, \
+ .p.id = AV_CODEC_ID_##X, \
+ .init = amf_decode_init, \
+ FF_CODEC_RECEIVE_FRAME_CB(amf_decode_frame), \
+ .flush = amf_decode_flush, \
+ .close = amf_decode_close, \
+ .bsfs = bsf_name, \
+ .p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
+ .p.priv_class = &amf_decode_class, \
+ .p.pix_fmts = amf_dec_pix_fmts, \
+ .hw_configs = amf_hw_configs, \
+ .p.wrapper_name = "amf", \
+ .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \
+}; \
+
+DEFINE_AMF_DECODER(h264, H264, "h264_mp4toannexb")
+DEFINE_AMF_DECODER(hevc, HEVC, NULL)
+DEFINE_AMF_DECODER(av1, AV1, NULL)
diff --git a/libavcodec/amfdec.h b/libavcodec/amfdec.h
new file mode 100644
index 0000000000..542e613ce1
--- /dev/null
+++ b/libavcodec/amfdec.h
@@ -0,0 +1,72 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AMFDEC_H
+#define AVCODEC_AMFDEC_H
+
+#include <AMF/core/Buffer.h>
+#include <AMF/core/Factory.h>
+#include <AMF/core/Context.h>
+#include <AMF/core/Surface.h>
+#include <AMF/components/Component.h>
+#include <AMF/components/VideoDecoderUVD.h>
+
+#include "avcodec.h"
+#include "libavformat/avformat.h"
+#include "libavutil/fifo.h"
+#include "libavutil/frame.h"
+#include "libavutil/opt.h"
+
+/**
+* AMF decoder context
+*/
+
+typedef struct AvAmfDecoderContext {
+ AVClass *avclass;
+
+ AVBufferRef *amf_device_ctx_internal;
+ AVBufferRef *amf_device_ctx;
+
+ //decoder
+ AMFComponent *decoder; ///< AMF decoder object
+ AMF_SURFACE_FORMAT format; ///< AMF surface format
+
+ AVBufferRef *hw_device_ctx; ///< pointer to HW accelerator (decoder)
+ AVBufferRef *hw_frames_ctx; ///< pointer to HW accelerator (frame allocator)
+
+ AVBufferRef *hw_device_ref;
+ AVBufferRef *hw_frames_ref;
+
+ // common decoder options
+ int log_to_dbg;
+ // Static options, have to be set before Init() call
+ int decoder_mode;
+ int timestamp_mode;
+ int surface_pool_size;
+ int dpb_size;
+ int lowlatency;
+ int smart_access_video;
+ int skip_transfer_sav;
+ int drain;
+ int resolution_changed;
+ AVPacket* in_pkt;
+ enum AMF_SURFACE_FORMAT output_format;
+
+} AvAmfDecoderContext;
+
+#endif // AVCODEC_AMFDEC_H
\ No newline at end of file
--
2.38.1.windows.1
More information about the ffmpeg-devel
mailing list