[FFmpeg-devel] [PATCH] avcodec: Add MediaFoundation encoder wrapper

Marton Balint cus at passwd.hu
Sun May 10 10:44:27 EEST 2020



On Sun, 10 May 2020, Martin Storsjö wrote:

> From: wm4 <nfxjfg at googlemail.com>
>
> This contains encoder wrappers for H264, HEVC, AAC, AC3 and MP3.
>
> This is based on top of an original patch by wm4
> <nfxjfg at googlemail.com>. The original patch supported both encoding
> and decoding, but this patch only includes encoding.
>
> The patch contains further changes by Paweł Wegner
> <pawel.wegner95 at gmail.com> (primarily for splitting out the encoding
> parts of the original patch) and further cleanup, build compatibility
> fixes and tweaks for use with Qualcomm encoders by Martin Storsjö.
> ---
> This allows access to the HW video encoder on Windows on ARM64
> on Qualcomm platforms. However, to actually use that, one has to
> manually choose nv12 as input pixel format, otherwise the encoder
> format negotiation fails.
>
> I've tried to read up on the feedback this patch got the earlier
> times it was posted and address those issues. In particular,
> this is enabled automatically if suitable headers are available.
> The built binary still runs on Vista (even if the required MF
> functionality isn't available there).
>
> Building succeeds with MSVC, old and new mingw-w64 toolchains,
> and isn't detected nor enabled on mingw.org toolchains. The configure
> check looks for one of the API details used; mingw-w64 versions
> from before that feature was added won't try to build the code,
> while newer ones should have enough features to build it successfully.
> ---
> configure              |   11 +
> libavcodec/Makefile    |    1 +
> libavcodec/allcodecs.c |    5 +
> libavcodec/mf_utils.c  |  677 +++++++++++++++++++++++
> libavcodec/mf_utils.h  |  138 +++++
> libavcodec/mfenc.c     | 1181 ++++++++++++++++++++++++++++++++++++++++
> libavcodec/version.h   |    2 +-
> 7 files changed, 2014 insertions(+), 1 deletion(-)
> create mode 100644 libavcodec/mf_utils.c
> create mode 100644 libavcodec/mf_utils.h
> create mode 100644 libavcodec/mfenc.c

Missing docs update.

Regards,
Marton

>
> diff --git a/configure b/configure
> index e7162dbc56..a52d1ebed5 100755
> --- a/configure
> +++ b/configure
> @@ -304,6 +304,7 @@ External library support:
>   --enable-mbedtls         enable mbedTLS, needed for https support
>                            if openssl, gnutls or libtls is not used [no]
>   --enable-mediacodec      enable Android MediaCodec support [no]
> +  --enable-mf              enable decoding via MediaFoundation [auto]
>   --enable-libmysofa       enable libmysofa, needed for sofalizer filter [no]
>   --enable-openal          enable OpenAL 1.1 capture support [no]
>   --enable-opencl          enable OpenCL processing [no]
> @@ -1704,6 +1705,7 @@ EXTERNAL_AUTODETECT_LIBRARY_LIST="
>     libxcb_shape
>     libxcb_xfixes
>     lzma
> +    mf
>     schannel
>     sdl2
>     securetransport
> @@ -3013,6 +3015,8 @@ wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel"
> wmv3_vdpau_hwaccel_select="vc1_vdpau_hwaccel"
> 
> # hardware-accelerated codecs
> +mf_deps="mftransform_h MFCreateAlignedMemoryBuffer"
> +mf_extralibs="-lmfplat -lmfuuid -lole32 -lstrmiids"
> omx_deps="libdl pthreads"
> omx_rpi_select="omx"
> qsv_deps="libmfx"
> @@ -3037,6 +3041,8 @@ nvenc_deps="ffnvcodec"
> nvenc_deps_any="libdl LoadLibrary"
> nvenc_encoder_deps="nvenc"
> 
> +aac_mf_encoder_deps="mf"
> +ac3_mf_encoder_deps="mf"
> h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"
> h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"
> h264_amf_encoder_deps="amf"
> @@ -3045,6 +3051,7 @@ h264_cuvid_decoder_deps="cuvid"
> h264_cuvid_decoder_select="h264_mp4toannexb_bsf"
> h264_mediacodec_decoder_deps="mediacodec"
> h264_mediacodec_decoder_select="h264_mp4toannexb_bsf h264_parser"
> +h264_mf_encoder_deps="mf"
> h264_mmal_decoder_deps="mmal"
> h264_nvenc_encoder_deps="nvenc"
> h264_omx_encoder_deps="omx"
> @@ -3061,6 +3068,7 @@ hevc_cuvid_decoder_deps="cuvid"
> hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"
> hevc_mediacodec_decoder_deps="mediacodec"
> hevc_mediacodec_decoder_select="hevc_mp4toannexb_bsf hevc_parser"
> +hevc_mf_encoder_deps="mf"
> hevc_nvenc_encoder_deps="nvenc"
> hevc_qsv_decoder_select="hevc_mp4toannexb_bsf qsvdec"
> hevc_qsv_encoder_select="hevcparse qsvenc"
> @@ -3077,6 +3085,7 @@ mjpeg_qsv_encoder_deps="libmfx"
> mjpeg_qsv_encoder_select="qsvenc"
> mjpeg_vaapi_encoder_deps="VAEncPictureParameterBufferJPEG"
> mjpeg_vaapi_encoder_select="cbs_jpeg jpegtables vaapi_encode"
> +mp3_mf_encoder_deps="mf"
> mpeg1_cuvid_decoder_deps="cuvid"
> mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m"
> mpeg2_crystalhd_decoder_select="crystalhd"
> @@ -6099,6 +6108,7 @@ check_headers io.h
> check_headers linux/perf_event.h
> check_headers libcrystalhd/libcrystalhd_if.h
> check_headers malloc.h
> +check_headers mftransform.h
> check_headers net/udplite.h
> check_headers poll.h
> check_headers sys/param.h
> @@ -6161,6 +6171,7 @@ check_type "windows.h dxva.h" "DXVA_PicParams_VP9" -DWINAPI_FAMILY=WINAPI_FAMILY
> check_type "windows.h d3d11.h" "ID3D11VideoDecoder"
> check_type "windows.h d3d11.h" "ID3D11VideoContext"
> check_type "d3d9.h dxva2api.h" DXVA2_ConfigPictureDecode -D_WIN32_WINNT=0x0602
> +check_func_headers mfapi.h MFCreateAlignedMemoryBuffer -lmfplat
> 
> check_type "vdpau/vdpau.h" "VdpPictureInfoHEVC"
> check_type "vdpau/vdpau.h" "VdpPictureInfoVP9"
> diff --git a/libavcodec/Makefile b/libavcodec/Makefile
> index 38f6f07680..1bb08e11ad 100644
> --- a/libavcodec/Makefile
> +++ b/libavcodec/Makefile
> @@ -1033,6 +1033,7 @@ OBJS-$(CONFIG_LIBXAVS_ENCODER)            += libxavs.o
> OBJS-$(CONFIG_LIBXAVS2_ENCODER)           += libxavs2.o
> OBJS-$(CONFIG_LIBXVID_ENCODER)            += libxvid.o
> OBJS-$(CONFIG_LIBZVBI_TELETEXT_DECODER)   += libzvbi-teletextdec.o ass.o
> +OBJS-$(CONFIG_MF)                         += mfenc.o mf_utils.o
> 
> # parsers
> OBJS-$(CONFIG_AAC_LATM_PARSER)         += latm_parser.o
> diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
> index 54d40ebdbc..8473e5a023 100644
> --- a/libavcodec/allcodecs.c
> +++ b/libavcodec/allcodecs.c
> @@ -676,7 +676,9 @@ extern AVCodec ff_xsub_decoder;
> /* external libraries */
> extern AVCodec ff_aac_at_encoder;
> extern AVCodec ff_aac_at_decoder;
> +extern AVCodec ff_aac_mf_encoder;
> extern AVCodec ff_ac3_at_decoder;
> +extern AVCodec ff_ac3_mf_encoder;
> extern AVCodec ff_adpcm_ima_qt_at_decoder;
> extern AVCodec ff_alac_at_encoder;
> extern AVCodec ff_alac_at_decoder;
> @@ -688,6 +690,7 @@ extern AVCodec ff_ilbc_at_decoder;
> extern AVCodec ff_mp1_at_decoder;
> extern AVCodec ff_mp2_at_decoder;
> extern AVCodec ff_mp3_at_decoder;
> +extern AVCodec ff_mp3_mf_encoder;
> extern AVCodec ff_pcm_alaw_at_encoder;
> extern AVCodec ff_pcm_alaw_at_decoder;
> extern AVCodec ff_pcm_mulaw_at_encoder;
> @@ -757,6 +760,7 @@ extern AVCodec ff_libopenh264_encoder;
> extern AVCodec ff_libopenh264_decoder;
> extern AVCodec ff_h264_amf_encoder;
> extern AVCodec ff_h264_cuvid_decoder;
> +extern AVCodec ff_h264_mf_encoder;
> extern AVCodec ff_h264_nvenc_encoder;
> extern AVCodec ff_h264_omx_encoder;
> extern AVCodec ff_h264_qsv_encoder;
> @@ -771,6 +775,7 @@ extern AVCodec ff_nvenc_hevc_encoder;
> extern AVCodec ff_hevc_amf_encoder;
> extern AVCodec ff_hevc_cuvid_decoder;
> extern AVCodec ff_hevc_mediacodec_decoder;
> +extern AVCodec ff_hevc_mf_encoder;
> extern AVCodec ff_hevc_nvenc_encoder;
> extern AVCodec ff_hevc_qsv_encoder;
> extern AVCodec ff_hevc_v4l2m2m_encoder;
> diff --git a/libavcodec/mf_utils.c b/libavcodec/mf_utils.c
> new file mode 100644
> index 0000000000..c372ffe5c3
> --- /dev/null
> +++ b/libavcodec/mf_utils.c
> @@ -0,0 +1,677 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#define COBJMACROS
> +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
> +#undef _WIN32_WINNT
> +#define _WIN32_WINNT 0x0602
> +#endif
> +
> +#include "mf_utils.h"
> +#include "libavutil/pixdesc.h"
> +
> +HRESULT ff_MFGetAttributeSize(IMFAttributes *pattr, REFGUID guid,
> +                              UINT32 *pw, UINT32 *ph)
> +{
> +    UINT64 t;
> +    HRESULT hr = IMFAttributes_GetUINT64(pattr, guid, &t);
> +    if (!FAILED(hr)) {
> +        *pw = t >> 32;
> +        *ph = (UINT32)t;
> +    }
> +    return hr;
> +}
> +
> +HRESULT ff_MFSetAttributeSize(IMFAttributes *pattr, REFGUID guid,
> +                              UINT32 uw, UINT32 uh)
> +{
> +    UINT64 t = (((UINT64)uw) << 32) | uh;
> +    return IMFAttributes_SetUINT64(pattr, guid, t);
> +}
> +
> +#define ff_MFSetAttributeRatio ff_MFSetAttributeSize
> +#define ff_MFGetAttributeRatio ff_MFGetAttributeSize
> +
> +// MFTEnumEx was missing from mingw-w64's mfplat import library until
> +// mingw-w64 v6.0.0, thus wrap it and load it using GetProcAddress.
> +// It's also missing in Windows Vista's mfplat.dll.
> +HRESULT ff_MFTEnumEx(GUID guidCategory, UINT32 Flags,
> +                     const MFT_REGISTER_TYPE_INFO *pInputType,
> +                     const MFT_REGISTER_TYPE_INFO *pOutputType,
> +                     IMFActivate ***pppMFTActivate, UINT32 *pnumMFTActivate)
> +{
> +    HRESULT (WINAPI *MFTEnumEx_ptr)(GUID guidCategory, UINT32 Flags,
> +                                    const MFT_REGISTER_TYPE_INFO *pInputType,
> +                                    const MFT_REGISTER_TYPE_INFO *pOutputType,
> +                                    IMFActivate ***pppMFTActivate,
> +                                    UINT32 *pnumMFTActivate) = NULL;
> +    HANDLE lib = GetModuleHandleW(L"mfplat.dll");
> +    if (lib)
> +        MFTEnumEx_ptr = (void *)GetProcAddress(lib, "MFTEnumEx");
> +    if (!MFTEnumEx_ptr)
> +        return E_FAIL;
> +    return MFTEnumEx_ptr(guidCategory,
> +                         Flags,
> +                         pInputType,
> +                         pOutputType,
> +                         pppMFTActivate,
> +                         pnumMFTActivate);
> +}
> +
> +char *ff_hr_str_buf(char *buf, size_t size, HRESULT hr)
> +{
> +#define HR(x) case x: return (char *) # x;
> +    switch (hr) {
> +    HR(S_OK)
> +    HR(E_UNEXPECTED)
> +    HR(MF_E_INVALIDMEDIATYPE)
> +    HR(MF_E_INVALIDSTREAMNUMBER)
> +    HR(MF_E_INVALIDTYPE)
> +    HR(MF_E_TRANSFORM_CANNOT_CHANGE_MEDIATYPE_WHILE_PROCESSING)
> +    HR(MF_E_TRANSFORM_TYPE_NOT_SET)
> +    HR(MF_E_UNSUPPORTED_D3D_TYPE)
> +    HR(MF_E_TRANSFORM_NEED_MORE_INPUT)
> +    HR(MF_E_TRANSFORM_STREAM_CHANGE)
> +    HR(MF_E_NOTACCEPTING)
> +    HR(MF_E_NO_SAMPLE_TIMESTAMP)
> +    HR(MF_E_NO_SAMPLE_DURATION)
> +#undef HR
> +    }
> +    snprintf(buf, size, "%x", (unsigned)hr);
> +    return buf;
> +}
> +
> +// If fill_data!=NULL, initialize the buffer and set the length. (This is a
> +// subtle but important difference: some decoders want CurrentLength==0 on
> +// provided output buffers.)
> +IMFSample *ff_create_memory_sample(void *fill_data, size_t size, size_t align)
> +{
> +    HRESULT hr;
> +    IMFSample *sample;
> +    IMFMediaBuffer *buffer;
> +
> +    hr = MFCreateSample(&sample);
> +    if (FAILED(hr))
> +        return NULL;
> +
> +    align = FFMAX(align, 16); // 16 is "recommended", even if not required
> +
> +    hr = MFCreateAlignedMemoryBuffer(size, align - 1, &buffer);
> +    if (FAILED(hr))
> +        return NULL;
> +
> +    if (fill_data) {
> +        BYTE *tmp;
> +
> +        hr = IMFMediaBuffer_Lock(buffer, &tmp, NULL, NULL);
> +        if (FAILED(hr)) {
> +            IMFMediaBuffer_Release(buffer);
> +            IMFSample_Release(sample);
> +            return NULL;
> +        }
> +        memcpy(tmp, fill_data, size);
> +
> +        IMFMediaBuffer_SetCurrentLength(buffer, size);
> +        IMFMediaBuffer_Unlock(buffer);
> +    }
> +
> +    IMFSample_AddBuffer(sample, buffer);
> +    IMFMediaBuffer_Release(buffer);
> +
> +    return sample;
> +}
> +
> +enum AVSampleFormat ff_media_type_to_sample_fmt(IMFAttributes *type)
> +{
> +    HRESULT hr;
> +    UINT32 bits;
> +    GUID subtype;
> +
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_BITS_PER_SAMPLE, &bits);
> +    if (FAILED(hr))
> +        return AV_SAMPLE_FMT_NONE;
> +
> +    hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &subtype);
> +    if (FAILED(hr))
> +        return AV_SAMPLE_FMT_NONE;
> +
> +    if (IsEqualGUID(&subtype, &MFAudioFormat_PCM)) {
> +        switch (bits) {
> +        case 8:  return AV_SAMPLE_FMT_U8;
> +        case 16: return AV_SAMPLE_FMT_S16;
> +        case 32: return AV_SAMPLE_FMT_S32;
> +        }
> +    } else if (IsEqualGUID(&subtype, &MFAudioFormat_Float)) {
> +        switch (bits) {
> +        case 32: return AV_SAMPLE_FMT_FLT;
> +        case 64: return AV_SAMPLE_FMT_DBL;
> +        }
> +    }
> +
> +    return AV_SAMPLE_FMT_NONE;
> +}
> +
> +struct mf_pix_fmt_entry {
> +    const GUID *guid;
> +    enum AVPixelFormat pix_fmt;
> +};
> +
> +static const struct mf_pix_fmt_entry mf_pix_fmts[] = {
> +    {&MFVideoFormat_IYUV, AV_PIX_FMT_YUV420P},
> +    {&MFVideoFormat_I420, AV_PIX_FMT_YUV420P},
> +    {&MFVideoFormat_NV12, AV_PIX_FMT_NV12},
> +    {&MFVideoFormat_P010, AV_PIX_FMT_P010},
> +    {&MFVideoFormat_P016, AV_PIX_FMT_P010}, // not equal, but compatible
> +    {&MFVideoFormat_YUY2, AV_PIX_FMT_YUYV422},
> +};
> +
> +enum AVPixelFormat ff_media_type_to_pix_fmt(IMFAttributes *type)
> +{
> +    HRESULT hr;
> +    GUID subtype;
> +    int i;
> +
> +    hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &subtype);
> +    if (FAILED(hr))
> +        return AV_PIX_FMT_NONE;
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(mf_pix_fmts); i++) {
> +        if (IsEqualGUID(&subtype, mf_pix_fmts[i].guid))
> +            return mf_pix_fmts[i].pix_fmt;
> +    }
> +
> +    return AV_PIX_FMT_NONE;
> +}
> +
> +const GUID *ff_pix_fmt_to_guid(enum AVPixelFormat pix_fmt)
> +{
> +    int i;
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(mf_pix_fmts); i++) {
> +        if (mf_pix_fmts[i].pix_fmt == pix_fmt)
> +            return mf_pix_fmts[i].guid;
> +    }
> +
> +    return NULL;
> +}
> +
> +// If this GUID is of the form XXXXXXXX-0000-0010-8000-00AA00389B71, then
> +// extract the XXXXXXXX prefix as FourCC (oh the pain).
> +int ff_fourcc_from_guid(const GUID *guid, uint32_t *out_fourcc)
> +{
> +    if (guid->Data2 == 0 && guid->Data3 == 0x0010 &&
> +        guid->Data4[0] == 0x80 &&
> +        guid->Data4[1] == 0x00 &&
> +        guid->Data4[2] == 0x00 &&
> +        guid->Data4[3] == 0xAA &&
> +        guid->Data4[4] == 0x00 &&
> +        guid->Data4[5] == 0x38 &&
> +        guid->Data4[6] == 0x9B &&
> +        guid->Data4[7] == 0x71) {
> +        *out_fourcc = guid->Data1;
> +        return 0;
> +    }
> +
> +    *out_fourcc = 0;
> +    return AVERROR_UNKNOWN;
> +}
> +
> +struct GUID_Entry {
> +    const GUID *guid;
> +    const char *name;
> +};
> +
> +#define GUID_ENTRY(var) {&(var), # var}
> +
> +static struct GUID_Entry guid_names[] = {
> +    GUID_ENTRY(MFT_FRIENDLY_NAME_Attribute),
> +    GUID_ENTRY(MFT_TRANSFORM_CLSID_Attribute),
> +    GUID_ENTRY(MFT_ENUM_HARDWARE_URL_Attribute),
> +    GUID_ENTRY(MFT_CONNECTED_STREAM_ATTRIBUTE),
> +    GUID_ENTRY(MFT_CONNECTED_TO_HW_STREAM),
> +    GUID_ENTRY(MF_SA_D3D_AWARE),
> +    GUID_ENTRY(ff_MF_SA_MINIMUM_OUTPUT_SAMPLE_COUNT),
> +    GUID_ENTRY(ff_MF_SA_MINIMUM_OUTPUT_SAMPLE_COUNT_PROGRESSIVE),
> +    GUID_ENTRY(ff_MF_SA_D3D11_BINDFLAGS),
> +    GUID_ENTRY(ff_MF_SA_D3D11_USAGE),
> +    GUID_ENTRY(ff_MF_SA_D3D11_AWARE),
> +    GUID_ENTRY(ff_MF_SA_D3D11_SHARED),
> +    GUID_ENTRY(ff_MF_SA_D3D11_SHARED_WITHOUT_MUTEX),
> +    GUID_ENTRY(MF_MT_SUBTYPE),
> +    GUID_ENTRY(MF_MT_MAJOR_TYPE),
> +    GUID_ENTRY(MF_MT_AUDIO_SAMPLES_PER_SECOND),
> +    GUID_ENTRY(MF_MT_AUDIO_NUM_CHANNELS),
> +    GUID_ENTRY(MF_MT_AUDIO_CHANNEL_MASK),
> +    GUID_ENTRY(MF_MT_FRAME_SIZE),
> +    GUID_ENTRY(MF_MT_INTERLACE_MODE),
> +    GUID_ENTRY(MF_MT_USER_DATA),
> +    GUID_ENTRY(MF_MT_PIXEL_ASPECT_RATIO),
> +    GUID_ENTRY(MFMediaType_Audio),
> +    GUID_ENTRY(MFMediaType_Video),
> +    GUID_ENTRY(MFAudioFormat_PCM),
> +    GUID_ENTRY(MFAudioFormat_Float),
> +    GUID_ENTRY(MFVideoFormat_H264),
> +    GUID_ENTRY(MFVideoFormat_H264_ES),
> +    GUID_ENTRY(ff_MFVideoFormat_HEVC),
> +    GUID_ENTRY(ff_MFVideoFormat_HEVC_ES),
> +    GUID_ENTRY(MFVideoFormat_MPEG2),
> +    GUID_ENTRY(MFVideoFormat_MP43),
> +    GUID_ENTRY(MFVideoFormat_MP4V),
> +    GUID_ENTRY(MFVideoFormat_WMV1),
> +    GUID_ENTRY(MFVideoFormat_WMV2),
> +    GUID_ENTRY(MFVideoFormat_WMV3),
> +    GUID_ENTRY(MFVideoFormat_WVC1),
> +    GUID_ENTRY(MFAudioFormat_Dolby_AC3),
> +    GUID_ENTRY(MFAudioFormat_Dolby_DDPlus),
> +    GUID_ENTRY(MFAudioFormat_AAC),
> +    GUID_ENTRY(MFAudioFormat_MP3),
> +    GUID_ENTRY(MFAudioFormat_MSP1),
> +    GUID_ENTRY(MFAudioFormat_WMAudioV8),
> +    GUID_ENTRY(MFAudioFormat_WMAudioV9),
> +    GUID_ENTRY(MFAudioFormat_WMAudio_Lossless),
> +    GUID_ENTRY(MF_MT_ALL_SAMPLES_INDEPENDENT),
> +    GUID_ENTRY(MF_MT_AM_FORMAT_TYPE),
> +    GUID_ENTRY(MF_MT_COMPRESSED),
> +    GUID_ENTRY(MF_MT_FIXED_SIZE_SAMPLES),
> +    GUID_ENTRY(MF_MT_SAMPLE_SIZE),
> +    GUID_ENTRY(MF_MT_WRAPPED_TYPE),
> +    GUID_ENTRY(MF_MT_AAC_AUDIO_PROFILE_LEVEL_INDICATION),
> +    GUID_ENTRY(MF_MT_AAC_PAYLOAD_TYPE),
> +    GUID_ENTRY(MF_MT_AUDIO_AVG_BYTES_PER_SECOND),
> +    GUID_ENTRY(MF_MT_AUDIO_BITS_PER_SAMPLE),
> +    GUID_ENTRY(MF_MT_AUDIO_BLOCK_ALIGNMENT),
> +    GUID_ENTRY(MF_MT_AUDIO_CHANNEL_MASK),
> +    GUID_ENTRY(MF_MT_AUDIO_FLOAT_SAMPLES_PER_SECOND),
> +    GUID_ENTRY(MF_MT_AUDIO_FOLDDOWN_MATRIX),
> +    GUID_ENTRY(MF_MT_AUDIO_NUM_CHANNELS),
> +    GUID_ENTRY(MF_MT_AUDIO_PREFER_WAVEFORMATEX),
> +    GUID_ENTRY(MF_MT_AUDIO_SAMPLES_PER_BLOCK),
> +    GUID_ENTRY(MF_MT_AUDIO_SAMPLES_PER_SECOND),
> +    GUID_ENTRY(MF_MT_AUDIO_VALID_BITS_PER_SAMPLE),
> +    GUID_ENTRY(MF_MT_AUDIO_WMADRC_AVGREF),
> +    GUID_ENTRY(MF_MT_AUDIO_WMADRC_AVGTARGET),
> +    GUID_ENTRY(MF_MT_AUDIO_WMADRC_PEAKREF),
> +    GUID_ENTRY(MF_MT_AUDIO_WMADRC_PEAKTARGET),
> +    GUID_ENTRY(MF_MT_ORIGINAL_WAVE_FORMAT_TAG),
> +    GUID_ENTRY(MF_MT_AVG_BIT_ERROR_RATE),
> +    GUID_ENTRY(MF_MT_AVG_BITRATE),
> +    GUID_ENTRY(MF_MT_CUSTOM_VIDEO_PRIMARIES),
> +    GUID_ENTRY(MF_MT_DEFAULT_STRIDE),
> +    GUID_ENTRY(MF_MT_DRM_FLAGS),
> +    GUID_ENTRY(MF_MT_FRAME_RATE),
> +    GUID_ENTRY(MF_MT_FRAME_RATE_RANGE_MAX),
> +    GUID_ENTRY(MF_MT_FRAME_RATE_RANGE_MIN),
> +    GUID_ENTRY(MF_MT_FRAME_SIZE),
> +    GUID_ENTRY(MF_MT_GEOMETRIC_APERTURE),
> +    GUID_ENTRY(MF_MT_INTERLACE_MODE),
> +    GUID_ENTRY(MF_MT_MAX_KEYFRAME_SPACING),
> +    GUID_ENTRY(MF_MT_MINIMUM_DISPLAY_APERTURE),
> +    GUID_ENTRY(MF_MT_MPEG_SEQUENCE_HEADER),
> +    GUID_ENTRY(MF_MT_MPEG_START_TIME_CODE),
> +    GUID_ENTRY(MF_MT_MPEG2_FLAGS),
> +    GUID_ENTRY(MF_MT_MPEG2_LEVEL),
> +    GUID_ENTRY(MF_MT_MPEG2_PROFILE),
> +    GUID_ENTRY(MF_MT_ORIGINAL_4CC),
> +    GUID_ENTRY(MF_MT_PAD_CONTROL_FLAGS),
> +    GUID_ENTRY(MF_MT_PALETTE),
> +    GUID_ENTRY(MF_MT_PAN_SCAN_APERTURE),
> +    GUID_ENTRY(MF_MT_PAN_SCAN_ENABLED),
> +    GUID_ENTRY(MF_MT_PIXEL_ASPECT_RATIO),
> +    GUID_ENTRY(MF_MT_SOURCE_CONTENT_HINT),
> +    GUID_ENTRY(MF_MT_TRANSFER_FUNCTION),
> +    GUID_ENTRY(MF_MT_VIDEO_CHROMA_SITING),
> +    GUID_ENTRY(MF_MT_VIDEO_LIGHTING),
> +    GUID_ENTRY(MF_MT_VIDEO_NOMINAL_RANGE),
> +    GUID_ENTRY(MF_MT_VIDEO_PRIMARIES),
> +    GUID_ENTRY(MF_MT_VIDEO_ROTATION),
> +    GUID_ENTRY(MF_MT_YUV_MATRIX),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoThumbnailGenerationMode),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoDropPicWithMissingRef),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoSoftwareDeinterlaceMode),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoFastDecodeMode),
> +    GUID_ENTRY(ff_CODECAPI_AVLowLatencyMode),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoH264ErrorConcealment),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoMPEG2ErrorConcealment),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoCodecType),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoDXVAMode),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoDXVABusEncryption),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoSWPowerLevel),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoMaxCodedWidth),
> +    GUID_ENTRY(ff_CODECAPI_AVDecVideoMaxCodedHeight),
> +    GUID_ENTRY(ff_CODECAPI_AVDecNumWorkerThreads),
> +    GUID_ENTRY(ff_CODECAPI_AVDecSoftwareDynamicFormatChange),
> +    GUID_ENTRY(ff_CODECAPI_AVDecDisableVideoPostProcessing),
> +};
> +
> +char *ff_guid_str_buf(char *buf, size_t buf_size, const GUID *guid)
> +{
> +    uint32_t fourcc;
> +    int n;
> +    for (n = 0; n < FF_ARRAY_ELEMS(guid_names); n++) {
> +        if (IsEqualGUID(guid, guid_names[n].guid)) {
> +            snprintf(buf, buf_size, "%s", guid_names[n].name);
> +            return buf;
> +        }
> +    }
> +
> +    if (ff_fourcc_from_guid(guid, &fourcc) >= 0) {
> +        snprintf(buf, buf_size, "<FourCC %s>", av_fourcc2str(fourcc));
> +        return buf;
> +    }
> +
> +    snprintf(buf, buf_size,
> +             "{%8.8x-%4.4x-%4.4x-%2.2x%2.2x-%2.2x%2.2x%2.2x%2.2x%2.2x%2.2x}",
> +             (unsigned) guid->Data1, guid->Data2, guid->Data3,
> +             guid->Data4[0], guid->Data4[1],
> +             guid->Data4[2], guid->Data4[3],
> +             guid->Data4[4], guid->Data4[5],
> +             guid->Data4[6], guid->Data4[7]);
> +    return buf;
> +}
> +
> +void ff_attributes_dump(void *log, IMFAttributes *attrs)
> +{
> +    HRESULT hr;
> +    UINT32 count;
> +    int n;
> +
> +    hr = IMFAttributes_GetCount(attrs, &count);
> +    if (FAILED(hr))
> +        return;
> +
> +    for (n = 0; n < count; n++) {
> +        GUID key;
> +        MF_ATTRIBUTE_TYPE type;
> +        char extra[80] = {0};
> +        const char *name = NULL;
> +
> +        hr = IMFAttributes_GetItemByIndex(attrs, n, &key, NULL);
> +        if (FAILED(hr))
> +            goto err;
> +
> +        name = ff_guid_str(&key);
> +
> +        if (IsEqualGUID(&key, &MF_MT_AUDIO_CHANNEL_MASK)) {
> +            UINT32 v;
> +            hr = IMFAttributes_GetUINT32(attrs, &key, &v);
> +            if (FAILED(hr))
> +                goto err;
> +            snprintf(extra, sizeof(extra), " (0x%x)", (unsigned)v);
> +        } else if (IsEqualGUID(&key, &MF_MT_FRAME_SIZE)) {
> +            UINT32 w, h;
> +
> +            hr = ff_MFGetAttributeSize(attrs, &MF_MT_FRAME_SIZE, &w, &h);
> +            if (FAILED(hr))
> +                goto err;
> +            snprintf(extra, sizeof(extra), " (%dx%d)", (int)w, (int)h);
> +        } else if (IsEqualGUID(&key, &MF_MT_PIXEL_ASPECT_RATIO) ||
> +                   IsEqualGUID(&key, &MF_MT_FRAME_RATE)) {
> +            UINT32 num, den;
> +
> +            hr = ff_MFGetAttributeRatio(attrs, &key, &num, &den);
> +            if (FAILED(hr))
> +                goto err;
> +            snprintf(extra, sizeof(extra), " (%d:%d)", (int)num, (int)den);
> +        }
> +
> +        hr = IMFAttributes_GetItemType(attrs, &key, &type);
> +        if (FAILED(hr))
> +            goto err;
> +
> +        switch (type) {
> +        case MF_ATTRIBUTE_UINT32: {
> +            UINT32 v;
> +            hr = IMFAttributes_GetUINT32(attrs, &key, &v);
> +            if (FAILED(hr))
> +                goto err;
> +            av_log(log, AV_LOG_VERBOSE, "   %s=%d%s\n", name, (int)v, extra);
> +            break;
> +        case MF_ATTRIBUTE_UINT64: {
> +            UINT64 v;
> +            hr = IMFAttributes_GetUINT64(attrs, &key, &v);
> +            if (FAILED(hr))
> +                goto err;
> +            av_log(log, AV_LOG_VERBOSE, "   %s=%lld%s\n", name, (long long)v, extra);
> +            break;
> +        }
> +        case MF_ATTRIBUTE_DOUBLE: {
> +            DOUBLE v;
> +            hr = IMFAttributes_GetDouble(attrs, &key, &v);
> +            if (FAILED(hr))
> +                goto err;
> +            av_log(log, AV_LOG_VERBOSE, "   %s=%f%s\n", name, (double)v, extra);
> +            break;
> +        }
> +        case MF_ATTRIBUTE_STRING: {
> +            wchar_t s[512]; // being lazy here
> +            hr = IMFAttributes_GetString(attrs, &key, s, sizeof(s), NULL);
> +            if (FAILED(hr))
> +                goto err;
> +            av_log(log, AV_LOG_VERBOSE, "   %s='%ls'%s\n", name, s, extra);
> +            break;
> +        }
> +        case MF_ATTRIBUTE_GUID: {
> +            GUID v;
> +            hr = IMFAttributes_GetGUID(attrs, &key, &v);
> +            if (FAILED(hr))
> +                goto err;
> +            av_log(log, AV_LOG_VERBOSE, "   %s=%s%s\n", name, ff_guid_str(&v), extra);
> +            break;
> +        }
> +        case MF_ATTRIBUTE_BLOB: {
> +            UINT32 sz;
> +            UINT8 buffer[100];
> +            hr = IMFAttributes_GetBlobSize(attrs, &key, &sz);
> +            if (FAILED(hr))
> +                goto err;
> +            if (sz <= sizeof(buffer)) {
> +                // hex-dump it
> +                char str[512] = {0};
> +                size_t pos = 0;
> +                hr = IMFAttributes_GetBlob(attrs, &key, buffer, sizeof(buffer), &sz);
> +                if (FAILED(hr))
> +                    goto err;
> +                for (pos = 0; pos < sz; pos++) {
> +                    const char *hex = "0123456789ABCDEF";
> +                    if (pos * 3 + 3 > sizeof(str))
> +                        break;
> +                    str[pos * 3 + 0] = hex[buffer[pos] >> 4];
> +                    str[pos * 3 + 1] = hex[buffer[pos] & 15];
> +                    str[pos * 3 + 2] = ' ';
> +                }
> +                str[pos * 3 + 0] = 0;
> +                av_log(log, AV_LOG_VERBOSE, "   %s=<blob size %d: %s>%s\n", name, (int)sz, str, extra);
> +            } else {
> +                av_log(log, AV_LOG_VERBOSE, "   %s=<blob size %d>%s\n", name, (int)sz, extra);
> +            }
> +            break;
> +        }
> +        case MF_ATTRIBUTE_IUNKNOWN: {
> +            av_log(log, AV_LOG_VERBOSE, "   %s=<IUnknown>%s\n", name, extra);
> +            break;
> +        }
> +        default:
> +            av_log(log, AV_LOG_VERBOSE, "   %s=<unknown type>%s\n", name, extra);
> +            break;
> +        }
> +        }
> +
> +        if (IsEqualGUID(&key, &MF_MT_SUBTYPE)) {
> +            const char *fmt;
> +            fmt = av_get_sample_fmt_name(ff_media_type_to_sample_fmt(attrs));
> +            if (fmt)
> +                av_log(log, AV_LOG_VERBOSE, "   FF-sample-format=%s\n", fmt);
> +
> +            fmt = av_get_pix_fmt_name(ff_media_type_to_pix_fmt(attrs));
> +            if (fmt)
> +                av_log(log, AV_LOG_VERBOSE, "   FF-pixel-format=%s\n", fmt);
> +        }
> +
> +        continue;
> +    err:
> +        av_log(log, AV_LOG_VERBOSE, "   %s=<failed to get value>\n", name ? name : "?");
> +    }
> +}
> +
> +void ff_media_type_dump(void *log, IMFMediaType *type)
> +{
> +    ff_attributes_dump(log, (IMFAttributes *)type);
> +}
> +
> +const CLSID *ff_codec_to_mf_subtype(enum AVCodecID codec)
> +{
> +    switch (codec) {
> +    case AV_CODEC_ID_H264:              return &MFVideoFormat_H264;
> +    case AV_CODEC_ID_HEVC:              return &ff_MFVideoFormat_HEVC;
> +    case AV_CODEC_ID_AC3:               return &MFAudioFormat_Dolby_AC3;
> +    case AV_CODEC_ID_AAC:               return &MFAudioFormat_AAC;
> +    case AV_CODEC_ID_MP3:               return &MFAudioFormat_MP3;
> +    default:                            return NULL;
> +    }
> +}
> +
> +static int init_com_mf(void *log)
> +{
> +    HRESULT hr;
> +
> +    hr = CoInitializeEx(NULL, COINIT_MULTITHREADED);
> +    if (hr == RPC_E_CHANGED_MODE) {
> +        av_log(log, AV_LOG_ERROR, "COM must not be in STA mode\n");
> +        return AVERROR(EINVAL);
> +    } else if (FAILED(hr)) {
> +        av_log(log, AV_LOG_ERROR, "could not initialize COM\n");
> +        return AVERROR(ENOSYS);
> +    }
> +
> +    hr = MFStartup(MF_VERSION, MFSTARTUP_FULL);
> +    if (FAILED(hr)) {
> +        av_log(log, AV_LOG_ERROR, "could not initialize MediaFoundation\n");
> +        CoUninitialize();
> +        return AVERROR(ENOSYS);
> +    }
> +
> +    return 0;
> +}
> +
> +static void uninit_com_mf(void)
> +{
> +    MFShutdown();
> +    CoUninitialize();
> +}
> +
> +// Find and create a IMFTransform with the given input/output types. When done,
> +// you should use ff_free_mf() to destroy it, which will also uninit COM.
> +int ff_instantiate_mf(void *log,
> +                      GUID category,
> +                      MFT_REGISTER_TYPE_INFO *in_type,
> +                      MFT_REGISTER_TYPE_INFO *out_type,
> +                      int use_hw,
> +                      IMFTransform **res)
> +{
> +    HRESULT hr;
> +    int n;
> +    int ret;
> +    IMFActivate **activate;
> +    UINT32 num_activate;
> +    IMFActivate *winner = 0;
> +    UINT32 flags;
> +
> +    ret = init_com_mf(log);
> +    if (ret < 0)
> +        return ret;
> +
> +    flags = MFT_ENUM_FLAG_SORTANDFILTER;
> +
> +    if (use_hw) {
> +        flags |= MFT_ENUM_FLAG_HARDWARE;
> +    } else {
> +        flags |= MFT_ENUM_FLAG_SYNCMFT;
> +    }
> +
> +    hr = ff_MFTEnumEx(category, flags, in_type, out_type, &activate,
> +                      &num_activate);
> +    if (FAILED(hr))
> +        goto error_uninit_mf;
> +
> +    if (log) {
> +        if (!num_activate)
> +            av_log(log, AV_LOG_ERROR, "could not find any MFT for the given media type\n");
> +
> +        for (n = 0; n < num_activate; n++) {
> +            av_log(log, AV_LOG_VERBOSE, "MF %d attributes:\n", n);
> +            ff_attributes_dump(log, (IMFAttributes *)activate[n]);
> +        }
> +    }
> +
> +    *res = NULL;
> +    for (n = 0; n < num_activate; n++) {
> +        if (log)
> +            av_log(log, AV_LOG_VERBOSE, "activate MFT %d\n", n);
> +        hr = IMFActivate_ActivateObject(activate[n], &IID_IMFTransform,
> +                                        (void **)res);
> +        if (*res) {
> +            winner = activate[n];
> +            IMFActivate_AddRef(winner);
> +            break;
> +        }
> +    }
> +
> +    for (n = 0; n < num_activate; n++)
> +       IMFActivate_Release(activate[n]);
> +    CoTaskMemFree(activate);
> +
> +    if (!*res) {
> +        if (log)
> +            av_log(log, AV_LOG_ERROR, "could not create MFT\n");
> +        goto error_uninit_mf;
> +    }
> +
> +    if (log) {
> +        wchar_t s[512]; // being lazy here
> +        IMFAttributes *attrs;
> +        hr = IMFTransform_GetAttributes(*res, &attrs);
> +        if (!FAILED(hr) && attrs) {
> +
> +            av_log(log, AV_LOG_VERBOSE, "MFT attributes\n");
> +            ff_attributes_dump(log, attrs);
> +            IMFAttributes_Release(attrs);
> +        }
> +
> +        hr = IMFActivate_GetString(winner, &MFT_FRIENDLY_NAME_Attribute, s,
> +                                   sizeof(s), NULL);
> +        if (!FAILED(hr))
> +            av_log(log, AV_LOG_INFO, "MFT name: '%ls'\n", s);
> +
> +    }
> +
> +    IMFActivate_Release(winner);
> +
> +    return 0;
> +
> +error_uninit_mf:
> +    uninit_com_mf();
> +    return AVERROR(ENOSYS);
> +}
> +
> +void ff_free_mf(IMFTransform **mft)
> +{
> +    if (*mft)
> +        IMFTransform_Release(*mft);
> +    *mft = NULL;
> +    uninit_com_mf();
> +}
> diff --git a/libavcodec/mf_utils.h b/libavcodec/mf_utils.h
> new file mode 100644
> index 0000000000..cfca136b70
> --- /dev/null
> +++ b/libavcodec/mf_utils.h
> @@ -0,0 +1,138 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#ifndef AVCODEC_MF_UTILS_H
> +#define AVCODEC_MF_UTILS_H
> +
> +#include <windows.h>
> +#include <initguid.h>
> +#include <wmcodecdsp.h>
> +#include <mfapi.h>
> +#include <mferror.h>
> +#include <mfobjects.h>
> +#include <mftransform.h>
> +#include <uuids.h>
> +#include <codecapi.h>
> +
> +#include "avcodec.h"
> +
> +// These functions do exist in mfapi.h, but are only available within
> +// __cplusplus ifdefs.
> +HRESULT ff_MFGetAttributeSize(IMFAttributes *pattr, REFGUID guid,
> +                              UINT32 *pw, UINT32 *ph);
> +HRESULT ff_MFSetAttributeSize(IMFAttributes *pattr, REFGUID guid,
> +                              UINT32 uw, UINT32 uh);
> +#define ff_MFSetAttributeRatio ff_MFSetAttributeSize
> +#define ff_MFGetAttributeRatio ff_MFGetAttributeSize
> +
> +// MFTEnumEx was missing from mingw-w64's mfplat import library until
> +// mingw-w64 v6.0.0, thus wrap it and load it using GetProcAddress.
> +// It's also missing in Windows Vista's mfplat.dll.
> +HRESULT ff_MFTEnumEx(GUID guidCategory, UINT32 Flags,
> +                     const MFT_REGISTER_TYPE_INFO *pInputType,
> +                     const MFT_REGISTER_TYPE_INFO *pOutputType,
> +                     IMFActivate ***pppMFTActivate, UINT32 *pnumMFTActivate);
> +
> +
> +// These do exist in mingw-w64's codecapi.h, but they aren't properly defined
> +// by the header until after mingw-w64 v7.0.0.
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoThumbnailGenerationMode, 0x2efd8eee,0x1150,0x4328,0x9c,0xf5,0x66,0xdc,0xe9,0x33,0xfc,0xf4);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoDropPicWithMissingRef, 0xf8226383,0x14c2,0x4567,0x97,0x34,0x50,0x04,0xe9,0x6f,0xf8,0x87);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoSoftwareDeinterlaceMode, 0x0c08d1ce,0x9ced,0x4540,0xba,0xe3,0xce,0xb3,0x80,0x14,0x11,0x09);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoFastDecodeMode, 0x6b529f7d,0xd3b1,0x49c6,0xa9,0x99,0x9e,0xc6,0x91,0x1b,0xed,0xbf);
> +DEFINE_GUID(ff_CODECAPI_AVLowLatencyMode, 0x9c27891a,0xed7a,0x40e1,0x88,0xe8,0xb2,0x27,0x27,0xa0,0x24,0xee);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoH264ErrorConcealment, 0xececace8,0x3436,0x462c,0x92,0x94,0xcd,0x7b,0xac,0xd7,0x58,0xa9);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoMPEG2ErrorConcealment, 0x9d2bfe18,0x728d,0x48d2,0xb3,0x58,0xbc,0x7e,0x43,0x6c,0x66,0x74);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoCodecType, 0x434528e5,0x21f0,0x46b6,0xb6,0x2c,0x9b,0x1b,0x6b,0x65,0x8c,0xd1);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoDXVAMode, 0xf758f09e,0x7337,0x4ae7,0x83,0x87,0x73,0xdc,0x2d,0x54,0xe6,0x7d);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoDXVABusEncryption, 0x42153c8b,0xfd0b,0x4765,0xa4,0x62,0xdd,0xd9,0xe8,0xbc,0xc3,0x88);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoSWPowerLevel, 0xfb5d2347,0x4dd8,0x4509,0xae,0xd0,0xdb,0x5f,0xa9,0xaa,0x93,0xf4);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoMaxCodedWidth, 0x5ae557b8,0x77af,0x41f5,0x9f,0xa6,0x4d,0xb2,0xfe,0x1d,0x4b,0xca);
> +DEFINE_GUID(ff_CODECAPI_AVDecVideoMaxCodedHeight, 0x7262a16a,0xd2dc,0x4e75,0x9b,0xa8,0x65,0xc0,0xc6,0xd3,0x2b,0x13);
> +DEFINE_GUID(ff_CODECAPI_AVDecNumWorkerThreads, 0x9561c3e8,0xea9e,0x4435,0x9b,0x1e,0xa9,0x3e,0x69,0x18,0x94,0xd8);
> +DEFINE_GUID(ff_CODECAPI_AVDecSoftwareDynamicFormatChange, 0x862e2f0a,0x507b,0x47ff,0xaf,0x47,0x01,0xe2,0x62,0x42,0x98,0xb7);
> +DEFINE_GUID(ff_CODECAPI_AVDecDisableVideoPostProcessing, 0xf8749193,0x667a,0x4f2c,0xa9,0xe8,0x5d,0x4a,0xf9,0x24,0xf0,0x8f);
> +
> +// These are missing from mingw-w64's headers until after mingw-w64 v7.0.0.
> +DEFINE_GUID(ff_CODECAPI_AVEncCommonRateControlMode,      0x1c0608e9, 0x370c, 0x4710, 0x8a, 0x58, 0xcb, 0x61, 0x81, 0xc4, 0x24, 0x23);
> +DEFINE_GUID(ff_CODECAPI_AVEncCommonQuality,       0xfcbf57a3, 0x7ea5, 0x4b0c, 0x96, 0x44, 0x69, 0xb4, 0x0c, 0x39, 0xc3, 0x91);
> +DEFINE_GUID(ff_CODECAPI_AVEncCommonMeanBitRate,   0xf7222374, 0x2144, 0x4815, 0xb5, 0x50, 0xa3, 0x7f, 0x8e, 0x12, 0xee, 0x52);
> +DEFINE_GUID(ff_CODECAPI_AVEncH264CABACEnable,    0xee6cad62, 0xd305, 0x4248, 0xa5, 0xe, 0xe1, 0xb2, 0x55, 0xf7, 0xca, 0xf8);
> +DEFINE_GUID(ff_CODECAPI_AVEncVideoForceKeyFrame, 0x398c1b98, 0x8353, 0x475a, 0x9e, 0xf2, 0x8f, 0x26, 0x5d, 0x26, 0x3, 0x45);
> +DEFINE_GUID(ff_CODECAPI_AVEncMPVDefaultBPictureCount, 0x8d390aac, 0xdc5c, 0x4200, 0xb5, 0x7f, 0x81, 0x4d, 0x04, 0xba, 0xba, 0xb2);
> +
> +DEFINE_GUID(ff_MF_SA_D3D11_BINDFLAGS, 0xeacf97ad, 0x065c, 0x4408, 0xbe, 0xe3, 0xfd, 0xcb, 0xfd, 0x12, 0x8b, 0xe2);
> +DEFINE_GUID(ff_MF_SA_D3D11_USAGE, 0xe85fe442, 0x2ca3, 0x486e, 0xa9, 0xc7, 0x10, 0x9d, 0xda, 0x60, 0x98, 0x80);
> +DEFINE_GUID(ff_MF_SA_D3D11_AWARE, 0x206b4fc8, 0xfcf9, 0x4c51, 0xaf, 0xe3, 0x97, 0x64, 0x36, 0x9e, 0x33, 0xa0);
> +DEFINE_GUID(ff_MF_SA_D3D11_SHARED, 0x7b8f32c3, 0x6d96, 0x4b89, 0x92, 0x3, 0xdd, 0x38, 0xb6, 0x14, 0x14, 0xf3);
> +DEFINE_GUID(ff_MF_SA_D3D11_SHARED_WITHOUT_MUTEX, 0x39dbd44d, 0x2e44, 0x4931, 0xa4, 0xc8, 0x35, 0x2d, 0x3d, 0xc4, 0x21, 0x15);
> +DEFINE_GUID(ff_MF_SA_MINIMUM_OUTPUT_SAMPLE_COUNT, 0x851745d5, 0xc3d6, 0x476d, 0x95, 0x27, 0x49, 0x8e, 0xf2, 0xd1, 0xd, 0x18);
> +DEFINE_GUID(ff_MF_SA_MINIMUM_OUTPUT_SAMPLE_COUNT_PROGRESSIVE, 0xf5523a5, 0x1cb2, 0x47c5, 0xa5, 0x50, 0x2e, 0xeb, 0x84, 0xb4, 0xd1, 0x4a);
> +
> +DEFINE_MEDIATYPE_GUID(ff_MFVideoFormat_HEVC, 0x43564548); // FCC('HEVC')
> +DEFINE_MEDIATYPE_GUID(ff_MFVideoFormat_HEVC_ES, 0x53564548); // FCC('HEVS')
> +
> +
> +// This enum is missing from mingw-w64's codecapi.h by v7.0.0.
> +enum ff_eAVEncCommonRateControlMode {
> +    ff_eAVEncCommonRateControlMode_CBR                 = 0,
> +    ff_eAVEncCommonRateControlMode_PeakConstrainedVBR  = 1,
> +    ff_eAVEncCommonRateControlMode_UnconstrainedVBR    = 2,
> +    ff_eAVEncCommonRateControlMode_Quality             = 3,
> +    ff_eAVEncCommonRateControlMode_LowDelayVBR         = 4,
> +    ff_eAVEncCommonRateControlMode_GlobalVBR           = 5,
> +    ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR   = 6
> +};
> +
> +// These do exist in mingw-w64's mfobjects.idl, but are missing from
> +// mfobjects.h that is generated from the former, due to incorrect use of
> +// ifdefs in the IDL file.
> +enum {
> +    ff_METransformUnknown = 600,
> +    ff_METransformNeedInput,
> +    ff_METransformHaveOutput,
> +    ff_METransformDrainComplete,
> +    ff_METransformMarker,
> +};
> +
> +char *ff_hr_str_buf(char *buf, size_t size, HRESULT hr);
> +#define ff_hr_str(hr) ff_hr_str_buf((char[80]){0}, 80, hr)
> +
> +// Possibly compiler-dependent; the MS/MinGW definition for this is just crazy.
> +#define FF_VARIANT_VALUE(type, contents) &(VARIANT){ .vt = (type), contents }
> +
> +#define FF_VAL_VT_UI4(v) FF_VARIANT_VALUE(VT_UI4, .ulVal = (v))
> +#define FF_VAL_VT_BOOL(v) FF_VARIANT_VALUE(VT_BOOL, .boolVal = (v))
> +
> +IMFSample *ff_create_memory_sample(void *fill_data, size_t size, size_t align);
> +enum AVSampleFormat ff_media_type_to_sample_fmt(IMFAttributes *type);
> +enum AVPixelFormat ff_media_type_to_pix_fmt(IMFAttributes *type);
> +const GUID *ff_pix_fmt_to_guid(enum AVPixelFormat pix_fmt);
> +int ff_fourcc_from_guid(const GUID *guid, uint32_t *out_fourcc);
> +char *ff_guid_str_buf(char *buf, size_t buf_size, const GUID *guid);
> +#define ff_guid_str(guid) ff_guid_str_buf((char[80]){0}, 80, guid)
> +void ff_attributes_dump(void *log, IMFAttributes *attrs);
> +void ff_media_type_dump(void *log, IMFMediaType *type);
> +const CLSID *ff_codec_to_mf_subtype(enum AVCodecID codec);
> +int ff_instantiate_mf(void *log, GUID category,
> +                      MFT_REGISTER_TYPE_INFO *in_type,
> +                      MFT_REGISTER_TYPE_INFO *out_type,
> +                      int use_hw, IMFTransform **res);
> +void ff_free_mf(IMFTransform **mft);
> +
> +#endif
> diff --git a/libavcodec/mfenc.c b/libavcodec/mfenc.c
> new file mode 100644
> index 0000000000..2ac7a859c1
> --- /dev/null
> +++ b/libavcodec/mfenc.c
> @@ -0,0 +1,1181 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#define COBJMACROS
> +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602
> +#undef _WIN32_WINNT
> +#define _WIN32_WINNT 0x0602
> +#endif
> +
> +#include "mf_utils.h"
> +#include "libavutil/imgutils.h"
> +#include "libavutil/opt.h"
> +#include "libavutil/time.h"
> +
> +// Include after mf_utils.h due to Windows include mess.
> +#include "mpeg4audio.h"
> +
> +typedef struct MFContext {
> +    AVClass *av_class;
> +    int is_video, is_audio;
> +    GUID main_subtype;
> +    IMFTransform *mft;
> +    IMFMediaEventGenerator *async_events;
> +    DWORD in_stream_id, out_stream_id;
> +    MFT_INPUT_STREAM_INFO in_info;
> +    MFT_OUTPUT_STREAM_INFO out_info;
> +    int out_stream_provides_samples;
> +    int draining, draining_done;
> +    int sample_sent;
> +    int async_need_input, async_have_output, async_marker;
> +    int64_t reorder_delay;
> +    ICodecAPI *codec_api;
> +    AVBSFContext *bsfc;
> +    // set by AVOption
> +    int opt_enc_rc;
> +    int opt_enc_quality;
> +    int opt_enc_hw;
> +} MFContext;
> +
> +static int mf_choose_output_type(AVCodecContext *avctx);
> +static int mf_setup_context(AVCodecContext *avctx);
> +
> +#define MF_TIMEBASE (AVRational){1, 10000000}
> +// Sentinel value only used by us.
> +#define MF_INVALID_TIME AV_NOPTS_VALUE
> +
> +static int mf_wait_events(AVCodecContext *avctx)
> +{
> +    MFContext *c = avctx->priv_data;
> +
> +    if (!c->async_events)
> +        return 0;
> +
> +    while (!(c->async_need_input || c->async_have_output || c->draining_done || c->async_marker)) {
> +        IMFMediaEvent *ev = NULL;
> +        MediaEventType ev_id = 0;
> +        HRESULT hr = IMFMediaEventGenerator_GetEvent(c->async_events, 0, &ev);
> +        if (FAILED(hr)) {
> +            av_log(avctx, AV_LOG_ERROR, "IMFMediaEventGenerator_GetEvent() failed: %s\n",
> +                   ff_hr_str(hr));
> +            return AVERROR_EXTERNAL;
> +        }
> +        IMFMediaEvent_GetType(ev, &ev_id);
> +        switch (ev_id) {
> +        case ff_METransformNeedInput:
> +            if (!c->draining)
> +                c->async_need_input = 1;
> +            break;
> +        case ff_METransformHaveOutput:
> +            c->async_have_output = 1;
> +            break;
> +        case ff_METransformDrainComplete:
> +            c->draining_done = 1;
> +            break;
> +        case ff_METransformMarker:
> +            c->async_marker = 1;
> +            break;
> +        default: ;
> +        }
> +        IMFMediaEvent_Release(ev);
> +    }
> +
> +    return 0;
> +}
> +
> +static AVRational mf_get_tb(AVCodecContext *avctx)
> +{
> +    if (avctx->pkt_timebase.num > 0 && avctx->pkt_timebase.den > 0)
> +        return avctx->pkt_timebase;
> +    if (avctx->time_base.num > 0 && avctx->time_base.den > 0)
> +        return avctx->time_base;
> +    return MF_TIMEBASE;
> +}
> +
> +static LONGLONG mf_to_mf_time(AVCodecContext *avctx, int64_t av_pts)
> +{
> +    if (av_pts == AV_NOPTS_VALUE)
> +        return MF_INVALID_TIME;
> +    return av_rescale_q(av_pts, mf_get_tb(avctx), MF_TIMEBASE);
> +}
> +
> +static void mf_sample_set_pts(AVCodecContext *avctx, IMFSample *sample, int64_t av_pts)
> +{
> +    LONGLONG stime = mf_to_mf_time(avctx, av_pts);
> +    if (stime != MF_INVALID_TIME)
> +        IMFSample_SetSampleTime(sample, stime);
> +}
> +
> +static int64_t mf_from_mf_time(AVCodecContext *avctx, LONGLONG stime)
> +{
> +    return av_rescale_q(stime, MF_TIMEBASE, mf_get_tb(avctx));
> +}
> +
> +static int64_t mf_sample_get_pts(AVCodecContext *avctx, IMFSample *sample)
> +{
> +    LONGLONG pts;
> +    HRESULT hr = IMFSample_GetSampleTime(sample, &pts);
> +    if (FAILED(hr))
> +        return AV_NOPTS_VALUE;
> +    return mf_from_mf_time(avctx, pts);
> +}
> +
> +static int mf_enca_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    UINT32 sz;
> +
> +    if (avctx->codec_id != AV_CODEC_ID_MP3 && avctx->codec_id != AV_CODEC_ID_AC3) {
> +        hr = IMFAttributes_GetBlobSize(type, &MF_MT_USER_DATA, &sz);
> +        if (!FAILED(hr) && sz > 0) {
> +            avctx->extradata = av_mallocz(sz + AV_INPUT_BUFFER_PADDING_SIZE);
> +            if (!avctx->extradata)
> +                return AVERROR(ENOMEM);
> +            avctx->extradata_size = sz;
> +            hr = IMFAttributes_GetBlob(type, &MF_MT_USER_DATA, avctx->extradata, sz, NULL);
> +            if (FAILED(hr))
> +                return AVERROR_EXTERNAL;
> +
> +            if (avctx->codec_id == AV_CODEC_ID_AAC && avctx->extradata_size >= 12) {
> +                // Get rid of HEAACWAVEINFO (after wfx field, 12 bytes).
> +                avctx->extradata_size = avctx->extradata_size - 12;
> +                memmove(avctx->extradata, avctx->extradata + 12, avctx->extradata_size);
> +            }
> +        }
> +    }
> +
> +    // I don't know where it's documented that we need this. It happens with the
> +    // MS mp3 encoder MFT. The idea for the workaround is taken from NAudio.
> +    // (Certainly any lossy codec will have frames much smaller than 1 second.)
> +    if (!c->out_info.cbSize && !c->out_stream_provides_samples) {
> +        hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &sz);
> +        if (!FAILED(hr)) {
> +            av_log(avctx, AV_LOG_VERBOSE, "MFT_OUTPUT_STREAM_INFO.cbSize set to 0, "
> +                   "assuming %d bytes instead.\n", (int)sz);
> +            c->out_info.cbSize = sz;
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +static int mf_encv_output_type_get(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    HRESULT hr;
> +    UINT32 sz;
> +
> +    hr = IMFAttributes_GetBlobSize(type, &MF_MT_MPEG_SEQUENCE_HEADER, &sz);
> +    if (!FAILED(hr) && sz > 0) {
> +        uint8_t *extradata = av_mallocz(sz + AV_INPUT_BUFFER_PADDING_SIZE);
> +        if (!extradata)
> +            return AVERROR(ENOMEM);
> +        hr = IMFAttributes_GetBlob(type, &MF_MT_MPEG_SEQUENCE_HEADER, extradata, sz, NULL);
> +        if (FAILED(hr)) {
> +            av_free(extradata);
> +            return AVERROR_EXTERNAL;
> +        }
> +        av_freep(&avctx->extradata);
> +        avctx->extradata = extradata;
> +        avctx->extradata_size = sz;
> +    }
> +
> +    return 0;
> +}
> +
> +static int mf_output_type_get(AVCodecContext *avctx)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    IMFMediaType *type;
> +    int ret;
> +
> +    hr = IMFTransform_GetOutputCurrentType(c->mft, c->out_stream_id, &type);
> +    if (FAILED(hr)) {
> +        av_log(avctx, AV_LOG_ERROR, "could not get output type\n");
> +        return AVERROR_EXTERNAL;
> +    }
> +
> +    av_log(avctx, AV_LOG_VERBOSE, "final output type:\n");
> +    ff_media_type_dump(avctx, type);
> +
> +    ret = 0;
> +    if (c->is_video) {
> +        ret = mf_encv_output_type_get(avctx, type);
> +    } else if (c->is_audio) {
> +        ret = mf_enca_output_type_get(avctx, type);
> +    }
> +
> +    if (ret < 0)
> +        av_log(avctx, AV_LOG_ERROR, "output type not supported\n");
> +
> +    IMFMediaType_Release(type);
> +    return ret;
> +}
> +
> +static int mf_sample_to_avpacket(AVCodecContext *avctx, IMFSample *sample, AVPacket *avpkt)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    int ret;
> +    DWORD len;
> +    IMFMediaBuffer *buffer;
> +    BYTE *data;
> +    UINT64 t;
> +    UINT32 t32;
> +
> +    hr = IMFSample_GetTotalLength(sample, &len);
> +    if (FAILED(hr))
> +        return AVERROR_EXTERNAL;
> +
> +    if ((ret = av_new_packet(avpkt, len)) < 0)
> +        return ret;
> +
> +    IMFSample_ConvertToContiguousBuffer(sample, &buffer);
> +    if (FAILED(hr))
> +        return AVERROR_EXTERNAL;
> +
> +    hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
> +    if (FAILED(hr)) {
> +        IMFMediaBuffer_Release(buffer);
> +        return AVERROR_EXTERNAL;
> +    }
> +
> +    memcpy(avpkt->data, data, len);
> +
> +    IMFMediaBuffer_Unlock(buffer);
> +    IMFMediaBuffer_Release(buffer);
> +
> +    avpkt->pts = avpkt->dts = mf_sample_get_pts(avctx, sample);
> +
> +    hr = IMFAttributes_GetUINT32(sample, &MFSampleExtension_CleanPoint, &t32);
> +    if (c->is_audio || (!FAILED(hr) && t32 != 0))
> +        avpkt->flags |= AV_PKT_FLAG_KEY;
> +
> +    hr = IMFAttributes_GetUINT64(sample, &MFSampleExtension_DecodeTimestamp, &t);
> +    if (!FAILED(hr)) {
> +        avpkt->dts = mf_from_mf_time(avctx, t);
> +        // At least on Qualcomm's HEVC encoder, the output dts starts from
> +        // the input pts of the first frame, while the output pts is shifted
> +        // forward. Therefore, shift the output values back so that the output
> +        // pts matches the input.
> +        if (c->reorder_delay == AV_NOPTS_VALUE)
> +            c->reorder_delay = avpkt->pts - avpkt->dts;
> +        avpkt->dts -= c->reorder_delay;
> +        avpkt->pts -= c->reorder_delay;
> +    }
> +
> +    return 0;
> +}
> +
> +static IMFSample *mf_a_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
> +{
> +    MFContext *c = avctx->priv_data;
> +    size_t len;
> +    size_t bps;
> +    IMFSample *sample;
> +
> +    bps = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->channels;
> +    len = frame->nb_samples * bps;
> +
> +    sample = ff_create_memory_sample(frame->data[0], len, c->in_info.cbAlignment);
> +    if (sample)
> +        IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->nb_samples));
> +    return sample;
> +}
> +
> +static IMFSample *mf_v_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
> +{
> +    MFContext *c = avctx->priv_data;
> +    IMFSample *sample;
> +    IMFMediaBuffer *buffer;
> +    BYTE *data;
> +    HRESULT hr;
> +    int ret;
> +    int size;
> +
> +    size = av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1);
> +    if (size < 0)
> +        return NULL;
> +
> +    sample = ff_create_memory_sample(NULL, size, c->in_info.cbAlignment);
> +    if (!sample)
> +        return NULL;
> +
> +    hr = IMFSample_GetBufferByIndex(sample, 0, &buffer);
> +    if (FAILED(hr)) {
> +        IMFSample_Release(sample);
> +        return NULL;
> +    }
> +
> +    hr = IMFMediaBuffer_Lock(buffer, &data, NULL, NULL);
> +    if (FAILED(hr)) {
> +        IMFMediaBuffer_Release(buffer);
> +        IMFSample_Release(sample);
> +        return NULL;
> +    }
> +
> +    ret = av_image_copy_to_buffer((uint8_t *)data, size, (void *)frame->data, frame->linesize,
> +                                  avctx->pix_fmt, avctx->width, avctx->height, 1);
> +    IMFMediaBuffer_SetCurrentLength(buffer, size);
> +    IMFMediaBuffer_Unlock(buffer);
> +    IMFMediaBuffer_Release(buffer);
> +    if (ret < 0) {
> +        IMFSample_Release(sample);
> +        return NULL;
> +    }
> +
> +    IMFSample_SetSampleDuration(sample, mf_to_mf_time(avctx, frame->pkt_duration));
> +
> +    return sample;
> +}
> +
> +static IMFSample *mf_avframe_to_sample(AVCodecContext *avctx, const AVFrame *frame)
> +{
> +    MFContext *c = avctx->priv_data;
> +    IMFSample *sample;
> +
> +    if (c->is_audio) {
> +        sample = mf_a_avframe_to_sample(avctx, frame);
> +    } else {
> +        sample = mf_v_avframe_to_sample(avctx, frame);
> +    }
> +
> +    if (sample)
> +        mf_sample_set_pts(avctx, sample, frame->pts);
> +
> +    return sample;
> +}
> +
> +static int mf_send_sample(AVCodecContext *avctx, IMFSample *sample)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    int ret;
> +
> +    if (sample) {
> +        if (c->async_events) {
> +            if ((ret = mf_wait_events(avctx)) < 0)
> +                return ret;
> +            if (!c->async_need_input)
> +                return AVERROR(EAGAIN);
> +        }
> +        if (!c->sample_sent)
> +            IMFSample_SetUINT32(sample, &MFSampleExtension_Discontinuity, TRUE);
> +        c->sample_sent = 1;
> +        hr = IMFTransform_ProcessInput(c->mft, c->in_stream_id, sample, 0);
> +        if (hr == MF_E_NOTACCEPTING) {
> +            return AVERROR(EAGAIN);
> +        } else if (FAILED(hr)) {
> +            av_log(avctx, AV_LOG_ERROR, "failed processing input: %s\n", ff_hr_str(hr));
> +            return AVERROR_EXTERNAL;
> +        }
> +        c->async_need_input = 0;
> +    } else if (!c->draining) {
> +        hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_COMMAND_DRAIN, 0);
> +        if (FAILED(hr))
> +            av_log(avctx, AV_LOG_ERROR, "failed draining: %s\n", ff_hr_str(hr));
> +        // Some MFTs (AC3) will send a frame after each drain command (???), so
> +        // this is required to make draining actually terminate.
> +        c->draining = 1;
> +        c->async_need_input = 0;
> +    } else {
> +        return AVERROR_EOF;
> +    }
> +    return 0;
> +}
> +
> +static int mf_send_frame(AVCodecContext *avctx, const AVFrame *frame)
> +{
> +    MFContext *c = avctx->priv_data;
> +    int ret;
> +    IMFSample *sample = NULL;
> +    if (frame) {
> +        sample = mf_avframe_to_sample(avctx, frame);
> +        if (!sample)
> +            return AVERROR(ENOMEM);
> +        if (c->is_video && c->codec_api) {
> +            if (frame->pict_type == AV_PICTURE_TYPE_I || !c->sample_sent)
> +                ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncVideoForceKeyFrame, FF_VAL_VT_UI4(1));
> +        }
> +    }
> +    ret = mf_send_sample(avctx, sample);
> +    if (sample)
> +        IMFSample_Release(sample);
> +    return ret;
> +}
> +
> +static int mf_receive_sample(AVCodecContext *avctx, IMFSample **out_sample)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    DWORD st;
> +    MFT_OUTPUT_DATA_BUFFER out_buffers;
> +    IMFSample *sample;
> +    int ret = 0;
> +
> +    while (1) {
> +        *out_sample = NULL;
> +        sample = NULL;
> +
> +        if (c->async_events) {
> +            if ((ret = mf_wait_events(avctx)) < 0)
> +                return ret;
> +            if (!c->async_have_output || c->draining_done) {
> +                ret = 0;
> +                break;
> +            }
> +        }
> +
> +        if (!c->out_stream_provides_samples) {
> +            sample = ff_create_memory_sample(NULL, c->out_info.cbSize, c->out_info.cbAlignment);
> +            if (!sample)
> +                return AVERROR(ENOMEM);
> +        }
> +
> +        out_buffers = (MFT_OUTPUT_DATA_BUFFER) {
> +            .dwStreamID = c->out_stream_id,
> +            .pSample = sample,
> +        };
> +
> +        st = 0;
> +        hr = IMFTransform_ProcessOutput(c->mft, 0, 1, &out_buffers, &st);
> +
> +        if (out_buffers.pEvents)
> +            IMFCollection_Release(out_buffers.pEvents);
> +
> +        if (!FAILED(hr)) {
> +            *out_sample = out_buffers.pSample;
> +            ret = 0;
> +            break;
> +        }
> +
> +        if (out_buffers.pSample)
> +            IMFSample_Release(out_buffers.pSample);
> +
> +        if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) {
> +            if (c->draining)
> +                c->draining_done = 1;
> +            ret = 0;
> +        } else if (hr == MF_E_TRANSFORM_STREAM_CHANGE) {
> +            av_log(avctx, AV_LOG_WARNING, "stream format change\n");
> +            ret = mf_choose_output_type(avctx);
> +            if (ret == 0) // we don't expect renegotiating the input type
> +                ret = AVERROR_EXTERNAL;
> +            if (ret > 0) {
> +                ret = mf_setup_context(avctx);
> +                if (ret >= 0) {
> +                    c->async_have_output = 0;
> +                    continue;
> +                }
> +            }
> +        } else {
> +            av_log(avctx, AV_LOG_ERROR, "failed processing output: %s\n", ff_hr_str(hr));
> +            ret = AVERROR_EXTERNAL;
> +        }
> +
> +        break;
> +    }
> +
> +    c->async_have_output = 0;
> +
> +    if (ret >= 0 && !*out_sample)
> +        ret = c->draining_done ? AVERROR_EOF : AVERROR(EAGAIN);
> +
> +    return ret;
> +}
> +
> +static int mf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
> +{
> +    IMFSample *sample;
> +    int ret;
> +
> +    ret = mf_receive_sample(avctx, &sample);
> +    if (ret < 0)
> +        return ret;
> +
> +    ret = mf_sample_to_avpacket(avctx, sample, avpkt);
> +    IMFSample_Release(sample);
> +
> +    return ret;
> +}
> +
> +// Most encoders seem to enumerate supported audio formats on the output types,
> +// at least as far as channel configuration and sample rate is concerned. Pick
> +// the one which seems to match best.
> +static int64_t mf_enca_output_score(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    UINT32 t;
> +    GUID tg;
> +    int64_t score = 0;
> +
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
> +    if (!FAILED(hr) && t == avctx->sample_rate)
> +        score |= 1LL << 32;
> +
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
> +    if (!FAILED(hr) && t == avctx->channels)
> +        score |= 2LL << 32;
> +
> +    hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
> +    if (!FAILED(hr)) {
> +        if (IsEqualGUID(&c->main_subtype, &tg))
> +            score |= 4LL << 32;
> +    }
> +
> +    // Select the bitrate (lowest priority).
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, &t);
> +    if (!FAILED(hr)) {
> +        int diff = (int)t - avctx->bit_rate / 8;
> +        if (diff >= 0) {
> +            score |= (1LL << 31) - diff; // prefer lower bitrate
> +        } else {
> +            score |= (1LL << 30) + diff; // prefer higher bitrate
> +        }
> +    }
> +
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AAC_PAYLOAD_TYPE, &t);
> +    if (!FAILED(hr) && t != 0)
> +        return -1;
> +
> +    return score;
> +}
> +
> +static int mf_enca_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    // (some decoders allow adjusting this freely, but it can also cause failure
> +    //  to set the output type - so it's commented for being too fragile)
> +    //IMFAttributes_SetUINT32(type, &MF_MT_AUDIO_AVG_BYTES_PER_SECOND, avctx->bit_rate / 8);
> +    //IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
> +
> +    return 0;
> +}
> +
> +static int64_t mf_enca_input_score(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    HRESULT hr;
> +    UINT32 t;
> +    int64_t score = 0;
> +
> +    enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
> +    if (sformat == AV_SAMPLE_FMT_NONE)
> +        return -1; // can not use
> +
> +    if (sformat == avctx->sample_fmt)
> +        score |= 1;
> +
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
> +    if (!FAILED(hr) && t == avctx->sample_rate)
> +        score |= 2;
> +
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
> +    if (!FAILED(hr) && t == avctx->channels)
> +        score |= 4;
> +
> +    return score;
> +}
> +
> +static int mf_enca_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    HRESULT hr;
> +    UINT32 t;
> +
> +    enum AVSampleFormat sformat = ff_media_type_to_sample_fmt((IMFAttributes *)type);
> +    if (sformat != avctx->sample_fmt) {
> +        av_log(avctx, AV_LOG_ERROR, "unsupported input sample format set\n");
> +        return AVERROR(EINVAL);
> +    }
> +
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_SAMPLES_PER_SECOND, &t);
> +    if (FAILED(hr) || t != avctx->sample_rate) {
> +        av_log(avctx, AV_LOG_ERROR, "unsupported input sample rate set\n");
> +        return AVERROR(EINVAL);
> +    }
> +
> +    hr = IMFAttributes_GetUINT32(type, &MF_MT_AUDIO_NUM_CHANNELS, &t);
> +    if (FAILED(hr) || t != avctx->channels) {
> +        av_log(avctx, AV_LOG_ERROR, "unsupported input channel number set\n");
> +        return AVERROR(EINVAL);
> +    }
> +
> +    return 0;
> +}
> +
> +static int64_t mf_encv_output_score(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    MFContext *c = avctx->priv_data;
> +    GUID tg;
> +    HRESULT hr;
> +    int score = -1;
> +
> +    hr = IMFAttributes_GetGUID(type, &MF_MT_SUBTYPE, &tg);
> +    if (!FAILED(hr)) {
> +        if (IsEqualGUID(&c->main_subtype, &tg))
> +            score = 1;
> +    }
> +
> +    return score;
> +}
> +
> +static int mf_encv_output_adjust(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    MFContext *c = avctx->priv_data;
> +
> +    ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
> +    IMFAttributes_SetUINT32(type, &MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive);
> +
> +    ff_MFSetAttributeRatio((IMFAttributes *)type, &MF_MT_FRAME_RATE, avctx->framerate.num, avctx->framerate.den);
> +
> +    // (MS HEVC supports eAVEncH265VProfile_Main_420_8 only.)
> +    if (avctx->codec_id == AV_CODEC_ID_H264) {
> +        UINT32 profile = eAVEncH264VProfile_Base;
> +        switch (avctx->profile) {
> +        case FF_PROFILE_H264_MAIN:
> +            profile = eAVEncH264VProfile_Main;
> +            break;
> +        case FF_PROFILE_H264_HIGH:
> +            profile = eAVEncH264VProfile_High;
> +            break;
> +        }
> +        IMFAttributes_SetUINT32(type, &MF_MT_MPEG2_PROFILE, profile);
> +    }
> +
> +    IMFAttributes_SetUINT32(type, &MF_MT_AVG_BITRATE, avctx->bit_rate);
> +
> +    // Note that some of the ICodecAPI options must be set before SetOutputType.
> +    if (c->codec_api) {
> +        if (avctx->bit_rate)
> +            ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonMeanBitRate, FF_VAL_VT_UI4(avctx->bit_rate));
> +
> +        if (c->opt_enc_rc >= 0)
> +            ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonRateControlMode, FF_VAL_VT_UI4(c->opt_enc_rc));
> +
> +        if (c->opt_enc_quality >= 0)
> +            ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncCommonQuality, FF_VAL_VT_UI4(c->opt_enc_quality));
> +
> +        // Always set the number of b-frames. Qualcomm's HEVC encoder defaults
> +        // this to 1, and that setting is buggy with many of the rate control
> +        // modes. (0 or 2 b-frames works fine with most rate control modes,
> +        // but 2 seems buggy with the u_vbr mode.)
> +        ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncMPVDefaultBPictureCount, FF_VAL_VT_UI4(avctx->max_b_frames));
> +        avctx->has_b_frames = avctx->max_b_frames > 1 ? 1 : 0;
> +
> +        ICodecAPI_SetValue(c->codec_api, &ff_CODECAPI_AVEncH264CABACEnable, FF_VAL_VT_BOOL(1));
> +    }
> +
> +    return 0;
> +}
> +
> +static int64_t mf_encv_input_score(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
> +    if (pix_fmt != avctx->pix_fmt)
> +        return -1; // can not use
> +
> +    return 0;
> +}
> +
> +static int mf_encv_input_adjust(AVCodecContext *avctx, IMFMediaType *type)
> +{
> +    enum AVPixelFormat pix_fmt = ff_media_type_to_pix_fmt((IMFAttributes *)type);
> +    if (pix_fmt != avctx->pix_fmt) {
> +        av_log(avctx, AV_LOG_ERROR, "unsupported input pixel format set\n");
> +        return AVERROR(EINVAL);
> +    }
> +
> +    //ff_MFSetAttributeSize((IMFAttributes *)type, &MF_MT_FRAME_SIZE, avctx->width, avctx->height);
> +
> +    return 0;
> +}
> +
> +static int mf_choose_output_type(AVCodecContext *avctx)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    int ret;
> +    IMFMediaType *out_type = NULL;
> +    int64_t out_type_score = -1;
> +    int out_type_index = -1;
> +    int n;
> +
> +    av_log(avctx, AV_LOG_VERBOSE, "output types:\n");
> +    for (n = 0; ; n++) {
> +        IMFMediaType *type;
> +        int64_t score = -1;
> +
> +        hr = IMFTransform_GetOutputAvailableType(c->mft, c->out_stream_id, n, &type);
> +        if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
> +            break;
> +        if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
> +            av_log(avctx, AV_LOG_VERBOSE, "(need to set input type)\n");
> +            ret = 0;
> +            goto done;
> +        }
> +        if (FAILED(hr)) {
> +            av_log(avctx, AV_LOG_ERROR, "error getting output type: %s\n", ff_hr_str(hr));
> +            ret = AVERROR_EXTERNAL;
> +            goto done;
> +        }
> +
> +        av_log(avctx, AV_LOG_VERBOSE, "output type %d:\n", n);
> +        ff_media_type_dump(avctx, type);
> +
> +        if (c->is_video) {
> +            score = mf_encv_output_score(avctx, type);
> +        } else if (c->is_audio) {
> +            score = mf_enca_output_score(avctx, type);
> +        }
> +
> +        if (score > out_type_score) {
> +            if (out_type)
> +                IMFMediaType_Release(out_type);
> +            out_type = type;
> +            out_type_score = score;
> +            out_type_index = n;
> +            IMFMediaType_AddRef(out_type);
> +        }
> +
> +        IMFMediaType_Release(type);
> +    }
> +
> +    if (out_type) {
> +        av_log(avctx, AV_LOG_VERBOSE, "picking output type %d.\n", out_type_index);
> +    } else {
> +        hr = MFCreateMediaType(&out_type);
> +        if (FAILED(hr)) {
> +            ret = AVERROR(ENOMEM);
> +            goto done;
> +        }
> +    }
> +
> +    ret = 0;
> +    if (c->is_video) {
> +        ret = mf_encv_output_adjust(avctx, out_type);
> +    } else if (c->is_audio) {
> +        ret = mf_enca_output_adjust(avctx, out_type);
> +    }
> +
> +    if (ret >= 0) {
> +        av_log(avctx, AV_LOG_VERBOSE, "setting output type:\n");
> +        ff_media_type_dump(avctx, out_type);
> +
> +        hr = IMFTransform_SetOutputType(c->mft, c->out_stream_id, out_type, 0);
> +        if (!FAILED(hr)) {
> +            ret = 1;
> +        } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
> +            av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set input type\n");
> +            ret = 0;
> +        } else {
> +            av_log(avctx, AV_LOG_ERROR, "could not set output type (%s)\n", ff_hr_str(hr));
> +            ret = AVERROR_EXTERNAL;
> +        }
> +    }
> +
> +done:
> +    if (out_type)
> +        IMFMediaType_Release(out_type);
> +    return ret;
> +}
> +
> +static int mf_choose_input_type(AVCodecContext *avctx)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    int ret;
> +    IMFMediaType *in_type = NULL;
> +    int64_t in_type_score = -1;
> +    int in_type_index = -1;
> +    int n;
> +
> +    av_log(avctx, AV_LOG_VERBOSE, "input types:\n");
> +    for (n = 0; ; n++) {
> +        IMFMediaType *type = NULL;
> +        int64_t score = -1;
> +
> +        hr = IMFTransform_GetInputAvailableType(c->mft, c->in_stream_id, n, &type);
> +        if (hr == MF_E_NO_MORE_TYPES || hr == E_NOTIMPL)
> +            break;
> +        if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
> +            av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 1)\n");
> +            ret = 0;
> +            goto done;
> +        }
> +        if (FAILED(hr)) {
> +            av_log(avctx, AV_LOG_ERROR, "error getting input type: %s\n", ff_hr_str(hr));
> +            ret = AVERROR_EXTERNAL;
> +            goto done;
> +        }
> +
> +        av_log(avctx, AV_LOG_VERBOSE, "input type %d:\n", n);
> +        ff_media_type_dump(avctx, type);
> +
> +        if (c->is_video) {
> +            score = mf_encv_input_score(avctx, type);
> +        } else if (c->is_audio) {
> +            score = mf_enca_input_score(avctx, type);
> +        }
> +
> +        if (score > in_type_score) {
> +            if (in_type)
> +                IMFMediaType_Release(in_type);
> +            in_type = type;
> +            in_type_score = score;
> +            in_type_index = n;
> +            IMFMediaType_AddRef(in_type);
> +        }
> +
> +        IMFMediaType_Release(type);
> +    }
> +
> +    if (in_type) {
> +        av_log(avctx, AV_LOG_VERBOSE, "picking input type %d.\n", in_type_index);
> +    } else {
> +        // Some buggy MFTs (WMA encoder) fail to return MF_E_TRANSFORM_TYPE_NOT_SET.
> +        av_log(avctx, AV_LOG_VERBOSE, "(need to set output type 2)\n");
> +        ret = 0;
> +        goto done;
> +    }
> +
> +    ret = 0;
> +    if (c->is_video) {
> +        ret = mf_encv_input_adjust(avctx, in_type);
> +    } else if (c->is_audio) {
> +        ret = mf_enca_input_adjust(avctx, in_type);
> +    }
> +
> +    if (ret >= 0) {
> +        av_log(avctx, AV_LOG_VERBOSE, "setting input type:\n");
> +        ff_media_type_dump(avctx, in_type);
> +
> +        hr = IMFTransform_SetInputType(c->mft, c->in_stream_id, in_type, 0);
> +        if (!FAILED(hr)) {
> +            ret = 1;
> +        } else if (hr == MF_E_TRANSFORM_TYPE_NOT_SET) {
> +            av_log(avctx, AV_LOG_VERBOSE, "rejected - need to set output type\n");
> +            ret = 0;
> +        } else {
> +            av_log(avctx, AV_LOG_ERROR, "could not set input type (%s)\n", ff_hr_str(hr));
> +            ret = AVERROR_EXTERNAL;
> +        }
> +    }
> +
> +done:
> +    if (in_type)
> +        IMFMediaType_Release(in_type);
> +    return ret;
> +}
> +
> +static int mf_negotiate_types(AVCodecContext *avctx)
> +{
> +    // This follows steps 1-5 on:
> +    //  https://msdn.microsoft.com/en-us/library/windows/desktop/aa965264(v=vs.85).aspx
> +    // If every MFT implementer does this correctly, this loop should at worst
> +    // be repeated once.
> +    int need_input = 1, need_output = 1;
> +    int n;
> +    for (n = 0; n < 2 && (need_input || need_output); n++) {
> +        int ret;
> +        ret = mf_choose_input_type(avctx);
> +        if (ret < 0)
> +            return ret;
> +        need_input = ret < 1;
> +        ret = mf_choose_output_type(avctx);
> +        if (ret < 0)
> +            return ret;
> +        need_output = ret < 1;
> +    }
> +    if (need_input || need_output) {
> +        av_log(avctx, AV_LOG_ERROR, "format negotiation failed (%d/%d)\n",
> +               need_input, need_output);
> +        return AVERROR_EXTERNAL;
> +    }
> +    return 0;
> +}
> +
> +static int mf_setup_context(AVCodecContext *avctx)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    int ret;
> +
> +    hr = IMFTransform_GetInputStreamInfo(c->mft, c->in_stream_id, &c->in_info);
> +    if (FAILED(hr))
> +        return AVERROR_EXTERNAL;
> +    av_log(avctx, AV_LOG_VERBOSE, "in_info: size=%d, align=%d\n",
> +           (int)c->in_info.cbSize, (int)c->in_info.cbAlignment);
> +
> +    hr = IMFTransform_GetOutputStreamInfo(c->mft, c->out_stream_id, &c->out_info);
> +    if (FAILED(hr))
> +        return AVERROR_EXTERNAL;
> +    c->out_stream_provides_samples =
> +        (c->out_info.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) ||
> +        (c->out_info.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES);
> +    av_log(avctx, AV_LOG_VERBOSE, "out_info: size=%d, align=%d%s\n",
> +           (int)c->out_info.cbSize, (int)c->out_info.cbAlignment,
> +           c->out_stream_provides_samples ? " (provides samples)" : "");
> +
> +    if ((ret = mf_output_type_get(avctx)) < 0)
> +        return ret;
> +
> +    return 0;
> +}
> +
> +static int mf_unlock_async(AVCodecContext *avctx)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    IMFAttributes *attrs;
> +    UINT32 v;
> +    int res = AVERROR_EXTERNAL;
> +
> +    // For hw encoding we unfortunately need to use async mode, otherwise
> +    // play it safe and avoid it.
> +    if (!(c->is_video && c->opt_enc_hw))
> +        return 0;
> +
> +    hr = IMFTransform_GetAttributes(c->mft, &attrs);
> +    if (FAILED(hr)) {
> +        av_log(avctx, AV_LOG_ERROR, "error retrieving MFT attributes: %s\n", ff_hr_str(hr));
> +        goto err;
> +    }
> +
> +    hr = IMFAttributes_GetUINT32(attrs, &MF_TRANSFORM_ASYNC, &v);
> +    if (FAILED(hr)) {
> +        av_log(avctx, AV_LOG_ERROR, "error querying async: %s\n", ff_hr_str(hr));
> +        goto err;
> +    }
> +
> +    if (!v) {
> +        av_log(avctx, AV_LOG_ERROR, "hardware MFT is not async\n");
> +        goto err;
> +    }
> +
> +    hr = IMFAttributes_SetUINT32(attrs, &MF_TRANSFORM_ASYNC_UNLOCK, TRUE);
> +    if (FAILED(hr)) {
> +        av_log(avctx, AV_LOG_ERROR, "could not set async unlock: %s\n", ff_hr_str(hr));
> +        goto err;
> +    }
> +
> +    hr = IMFTransform_QueryInterface(c->mft, &IID_IMFMediaEventGenerator, (void **)&c->async_events);
> +    if (FAILED(hr)) {
> +        av_log(avctx, AV_LOG_ERROR, "could not get async interface\n");
> +        goto err;
> +    }
> +
> +    res = 0;
> +
> +err:
> +    IMFAttributes_Release(attrs);
> +    return res;
> +}
> +
> +static int mf_create(void *log, IMFTransform **mft, const AVCodec *codec, int use_hw)
> +{
> +    int is_audio = codec->type == AVMEDIA_TYPE_AUDIO;
> +    const CLSID *subtype = ff_codec_to_mf_subtype(codec->id);
> +    MFT_REGISTER_TYPE_INFO reg = {0};
> +    GUID category;
> +    int ret;
> +
> +    *mft = NULL;
> +
> +    if (!subtype)
> +        return AVERROR(ENOSYS);
> +
> +    reg.guidSubtype = *subtype;
> +
> +    if (is_audio) {
> +        reg.guidMajorType = MFMediaType_Audio;
> +        category = MFT_CATEGORY_AUDIO_ENCODER;
> +    } else {
> +        reg.guidMajorType = MFMediaType_Video;
> +        category = MFT_CATEGORY_VIDEO_ENCODER;
> +    }
> +
> +    if ((ret = ff_instantiate_mf(log, category, NULL, &reg, use_hw, mft)) < 0)
> +        return ret;
> +
> +    return 0;
> +}
> +
> +static int mf_init(AVCodecContext *avctx)
> +{
> +    MFContext *c = avctx->priv_data;
> +    HRESULT hr;
> +    int ret;
> +    const CLSID *subtype = ff_codec_to_mf_subtype(avctx->codec_id);
> +    int use_hw = 0;
> +
> +    c->is_audio = avctx->codec_type == AVMEDIA_TYPE_AUDIO;
> +    c->is_video = !c->is_audio;
> +    c->reorder_delay = AV_NOPTS_VALUE;
> +
> +    if (c->is_video && c->opt_enc_hw)
> +        use_hw = 1;
> +
> +    if (!subtype)
> +        return AVERROR(ENOSYS);
> +
> +    c->main_subtype = *subtype;
> +
> +    if ((ret = mf_create(avctx, &c->mft, avctx->codec, use_hw)) < 0)
> +        return ret;
> +
> +    if ((ret = mf_unlock_async(avctx)) < 0)
> +        return ret;
> +
> +    hr = IMFTransform_QueryInterface(c->mft, &IID_ICodecAPI, (void **)&c->codec_api);
> +    if (!FAILED(hr))
> +        av_log(avctx, AV_LOG_VERBOSE, "MFT supports ICodecAPI.\n");
> +
> +
> +    hr = IMFTransform_GetStreamIDs(c->mft, 1, &c->in_stream_id, 1, &c->out_stream_id);
> +    if (hr == E_NOTIMPL) {
> +        c->in_stream_id = c->out_stream_id = 0;
> +    } else if (FAILED(hr)) {
> +        av_log(avctx, AV_LOG_ERROR, "could not get stream IDs (%s)\n", ff_hr_str(hr));
> +        return AVERROR_EXTERNAL;
> +    }
> +
> +    if ((ret = mf_negotiate_types(avctx)) < 0)
> +        return ret;
> +
> +    if ((ret = mf_setup_context(avctx)) < 0)
> +        return ret;
> +
> +    hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0);
> +    if (FAILED(hr)) {
> +        av_log(avctx, AV_LOG_ERROR, "could not start streaming (%s)\n", ff_hr_str(hr));
> +        return AVERROR_EXTERNAL;
> +    }
> +
> +    hr = IMFTransform_ProcessMessage(c->mft, MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0);
> +    if (FAILED(hr)) {
> +        av_log(avctx, AV_LOG_ERROR, "could not start stream (%s)\n", ff_hr_str(hr));
> +        return AVERROR_EXTERNAL;
> +    }
> +
> +    if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER && c->async_events &&
> +        c->is_video && !avctx->extradata) {
> +        int sleep = 10000, total = 0;
> +        av_log(avctx, AV_LOG_VERBOSE, "Awaiting extradata\n");
> +        while (total < 70*1000) {
> +            // The Qualcomm H264 encoder doesn't provide extradata immediately,
> +            // but it becomes available soon after init (without any waitable
> +            // event). In practice, it's available after less than 10 ms, but
> +            // wait for up to 70 ms before giving up.
> +            // Some encoders (Qualcomm's HEVC encoder, some versions of
> +            // the QSV H264 encoder at least) don't provide extradata this way
> +            // at all, not even after encoding a frame - it's only available
> +            // prepended to frames.
> +            av_usleep(sleep);
> +            total += sleep;
> +            mf_output_type_get(avctx);
> +            if (avctx->extradata)
> +                break;
> +            sleep *= 2;
> +        }
> +        av_log(avctx, AV_LOG_VERBOSE,
> +               "%s extradata in %d ms\n",
> +               avctx->extradata ? "Got" : "Didn't get",
> +               total / 1000);
> +    }
> +
> +    return 0;
> +}
> +
> +static int mf_close(AVCodecContext *avctx)
> +{
> +    MFContext *c = avctx->priv_data;
> +
> +    if (c->codec_api)
> +        ICodecAPI_Release(c->codec_api);
> +
> +    if (c->async_events)
> +        IMFMediaEventGenerator_Release(c->async_events);
> +
> +    av_bsf_free(&c->bsfc);
> +
> +    ff_free_mf(&c->mft);
> +
> +    av_freep(&avctx->extradata);
> +    avctx->extradata_size = 0;
> +
> +    return 0;
> +}
> +
> +#define OFFSET(x) offsetof(MFContext, x)
> +
> +#define MF_ENCODER(MEDIATYPE, NAME, ID, OPTS, EXTRA) \
> +    static const AVClass ff_ ## NAME ## _mf_encoder_class = {                  \
> +        .class_name = #NAME "_mf",                                             \
> +        .item_name  = av_default_item_name,                                    \
> +        .option     = OPTS,                                                    \
> +        .version    = LIBAVUTIL_VERSION_INT,                                   \
> +    };                                                                         \
> +    AVCodec ff_ ## NAME ## _mf_encoder = {                                     \
> +        .priv_class     = &ff_ ## NAME ## _mf_encoder_class,                   \
> +        .name           = #NAME "_mf",                                         \
> +        .long_name      = NULL_IF_CONFIG_SMALL(#ID " via MediaFoundation"),    \
> +        .type           = AVMEDIA_TYPE_ ## MEDIATYPE,                          \
> +        .id             = AV_CODEC_ID_ ## ID,                                  \
> +        .priv_data_size = sizeof(MFContext),                                   \
> +        .init           = mf_init,                                             \
> +        .close          = mf_close,                                            \
> +        .send_frame     = mf_send_frame,                                       \
> +        .receive_packet = mf_receive_packet,                                   \
> +        EXTRA                                                                  \
> +        .capabilities   = AV_CODEC_CAP_DELAY,                                  \
> +        .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE |                       \
> +                          FF_CODEC_CAP_INIT_CLEANUP,                           \
> +    };
> +
> +#define AFMTS \
> +        .sample_fmts    = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,    \
> +                                                         AV_SAMPLE_FMT_NONE },
> +
> +MF_ENCODER(AUDIO, aac,         AAC, NULL, AFMTS);
> +MF_ENCODER(AUDIO, ac3,         AC3, NULL, AFMTS);
> +MF_ENCODER(AUDIO, mp3,         MP3, NULL, AFMTS);
> +
> +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
> +static const AVOption venc_opts[] = {
> +    {"rate_control",  "Select rate control mode", OFFSET(opt_enc_rc), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE, "rate_control"},
> +    { "default",      "Default mode", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, "rate_control"},
> +    { "cbr",          "CBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_CBR}, 0, 0, VE, "rate_control"},
> +    { "pc_vbr",       "Peak constrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_PeakConstrainedVBR}, 0, 0, VE, "rate_control"},
> +    { "u_vbr",        "Unconstrained VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_UnconstrainedVBR}, 0, 0, VE, "rate_control"},
> +    { "quality",      "Quality mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_Quality}, 0, 0, VE, "rate_control" },
> +    // The following rate_control modes require Windows 8.
> +    { "ld_vbr",       "Low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_LowDelayVBR}, 0, 0, VE, "rate_control"},
> +    { "g_vbr",        "Global VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalVBR}, 0, 0, VE, "rate_control" },
> +    { "gld_vbr",      "Global low delay VBR mode", 0, AV_OPT_TYPE_CONST, {.i64 = ff_eAVEncCommonRateControlMode_GlobalLowDelayVBR}, 0, 0, VE, "rate_control"},
> +    {"quality",       "Quality", OFFSET(opt_enc_quality), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 100, VE},
> +    {"hw_encoding",   "Force hardware encoding", OFFSET(opt_enc_hw), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, VE, "hw_encoding"},
> +    {NULL}
> +};
> +
> +#define VFMTS \
> +        .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,       \
> +                                                        AV_PIX_FMT_YUV420P,    \
> +                                                        AV_PIX_FMT_NONE },
> +
> +MF_ENCODER(VIDEO, h264,        H264, venc_opts, VFMTS);
> +MF_ENCODER(VIDEO, hevc,        HEVC, venc_opts, VFMTS);
> diff --git a/libavcodec/version.h b/libavcodec/version.h
> index 691320b63c..82255d7f38 100644
> --- a/libavcodec/version.h
> +++ b/libavcodec/version.h
> @@ -28,7 +28,7 @@
> #include "libavutil/version.h"
> 
> #define LIBAVCODEC_VERSION_MAJOR  58
> -#define LIBAVCODEC_VERSION_MINOR  83
> +#define LIBAVCODEC_VERSION_MINOR  84
> #define LIBAVCODEC_VERSION_MICRO 100
> 
> #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
> -- 
> 2.17.1
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel at ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request at ffmpeg.org with subject "unsubscribe".


More information about the ffmpeg-devel mailing list