[FFmpeg-cvslog] lavfi: Add VAAPI deinterlacer
Mark Thompson
git at videolan.org
Mon Apr 17 11:28:17 EEST 2017
ffmpeg | branch: master | Mark Thompson <sw at jkqxz.net> | Thu Nov 24 23:27:11 2016 +0000| [ade370a4d7eab1866b6023c91c135d27c77ca465] | committer: Mark Thompson
lavfi: Add VAAPI deinterlacer
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=ade370a4d7eab1866b6023c91c135d27c77ca465
---
configure | 1 +
libavfilter/Makefile | 1 +
libavfilter/allfilters.c | 1 +
libavfilter/version.h | 2 +-
libavfilter/vf_deinterlace_vaapi.c | 625 +++++++++++++++++++++++++++++++++++++
5 files changed, 629 insertions(+), 1 deletion(-)
diff --git a/configure b/configure
index 804eee6df7..d88329c71f 100755
--- a/configure
+++ b/configure
@@ -2436,6 +2436,7 @@ boxblur_filter_deps="gpl"
bs2b_filter_deps="libbs2b"
cropdetect_filter_deps="gpl"
deinterlace_qsv_filter_deps="libmfx"
+deinterlace_vaapi_filter_deps="vaapi"
delogo_filter_deps="gpl"
drawtext_filter_deps="libfreetype"
frei0r_filter_deps="frei0r dlopen"
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 8b2841e03b..646a5b5a26 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -49,6 +49,7 @@ OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o
OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o
OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o
OBJS-$(CONFIG_DEINTERLACE_QSV_FILTER) += vf_deinterlace_qsv.o
+OBJS-$(CONFIG_DEINTERLACE_VAAPI_FILTER) += vf_deinterlace_vaapi.o
OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o
OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o
OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 246aed8412..3f09f46bb9 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -72,6 +72,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(CROP, crop, vf);
REGISTER_FILTER(CROPDETECT, cropdetect, vf);
REGISTER_FILTER(DEINTERLACE_QSV,deinterlace_qsv,vf);
+ REGISTER_FILTER(DEINTERLACE_VAAPI, deinterlace_vaapi, vf);
REGISTER_FILTER(DELOGO, delogo, vf);
REGISTER_FILTER(DRAWBOX, drawbox, vf);
REGISTER_FILTER(DRAWTEXT, drawtext, vf);
diff --git a/libavfilter/version.h b/libavfilter/version.h
index 596701fc09..969f8101cd 100644
--- a/libavfilter/version.h
+++ b/libavfilter/version.h
@@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 6
-#define LIBAVFILTER_VERSION_MINOR 8
+#define LIBAVFILTER_VERSION_MINOR 9
#define LIBAVFILTER_VERSION_MICRO 0
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
diff --git a/libavfilter/vf_deinterlace_vaapi.c b/libavfilter/vf_deinterlace_vaapi.c
new file mode 100644
index 0000000000..022baa11fc
--- /dev/null
+++ b/libavfilter/vf_deinterlace_vaapi.c
@@ -0,0 +1,625 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <string.h>
+
+#include <va/va.h>
+#include <va/va_vpp.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_vaapi.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+
+#define MAX_REFERENCES 8
+
+typedef struct DeintVAAPIContext {
+ const AVClass *class;
+
+ AVVAAPIDeviceContext *hwctx;
+ AVBufferRef *device_ref;
+
+ int mode;
+
+ int valid_ids;
+ VAConfigID va_config;
+ VAContextID va_context;
+
+ AVBufferRef *input_frames_ref;
+ AVHWFramesContext *input_frames;
+
+ AVBufferRef *output_frames_ref;
+ AVHWFramesContext *output_frames;
+ int output_height;
+ int output_width;
+
+ VAProcFilterCapDeinterlacing
+ deint_caps[VAProcDeinterlacingCount];
+ int nb_deint_caps;
+ VAProcPipelineCaps pipeline_caps;
+
+ int queue_depth;
+ int queue_count;
+ AVFrame *frame_queue[MAX_REFERENCES];
+
+ VABufferID filter_buffer;
+} DeintVAAPIContext;
+
+static const char *deint_vaapi_mode_name(int mode)
+{
+ switch (mode) {
+#define D(name) case VAProcDeinterlacing ## name: return #name
+ D(Bob);
+ D(Weave);
+ D(MotionAdaptive);
+ D(MotionCompensated);
+#undef D
+ default:
+ return "Invalid";
+ }
+}
+
+static int deint_vaapi_query_formats(AVFilterContext *avctx)
+{
+ enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_VAAPI, AV_PIX_FMT_NONE,
+ };
+
+ ff_formats_ref(ff_make_format_list(pix_fmts),
+ &avctx->inputs[0]->out_formats);
+ ff_formats_ref(ff_make_format_list(pix_fmts),
+ &avctx->outputs[0]->in_formats);
+
+ return 0;
+}
+
+static int deint_vaapi_pipeline_uninit(AVFilterContext *avctx)
+{
+ DeintVAAPIContext *ctx = avctx->priv;
+ int i;
+
+ for (i = 0; i < ctx->queue_count; i++)
+ av_frame_free(&ctx->frame_queue[i]);
+ ctx->queue_count = 0;
+
+ if (ctx->filter_buffer != VA_INVALID_ID) {
+ vaDestroyBuffer(ctx->hwctx->display, ctx->filter_buffer);
+ ctx->filter_buffer = VA_INVALID_ID;
+ }
+
+ if (ctx->va_context != VA_INVALID_ID) {
+ vaDestroyContext(ctx->hwctx->display, ctx->va_context);
+ ctx->va_context = VA_INVALID_ID;
+ }
+
+ if (ctx->va_config != VA_INVALID_ID) {
+ vaDestroyConfig(ctx->hwctx->display, ctx->va_config);
+ ctx->va_config = VA_INVALID_ID;
+ }
+
+ av_buffer_unref(&ctx->device_ref);
+ ctx->hwctx = NULL;
+
+ return 0;
+}
+
+static int deint_vaapi_config_input(AVFilterLink *inlink)
+{
+ AVFilterContext *avctx = inlink->dst;
+ DeintVAAPIContext *ctx = avctx->priv;
+
+ deint_vaapi_pipeline_uninit(avctx);
+
+ if (!inlink->hw_frames_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
+ "required to associate the processing device.\n");
+ return AVERROR(EINVAL);
+ }
+
+ ctx->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx);
+ ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
+
+ return 0;
+}
+
+static int deint_vaapi_build_filter_params(AVFilterContext *avctx)
+{
+ DeintVAAPIContext *ctx = avctx->priv;
+ VAStatus vas;
+ VAProcFilterParameterBufferDeinterlacing params;
+ int i;
+
+ ctx->nb_deint_caps = VAProcDeinterlacingCount;
+ vas = vaQueryVideoProcFilterCaps(ctx->hwctx->display,
+ ctx->va_context,
+ VAProcFilterDeinterlacing,
+ &ctx->deint_caps,
+ &ctx->nb_deint_caps);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query deinterlacing "
+ "caps: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ if (ctx->mode == VAProcDeinterlacingNone) {
+ for (i = 0; i < ctx->nb_deint_caps; i++) {
+ if (ctx->deint_caps[i].type > ctx->mode)
+ ctx->mode = ctx->deint_caps[i].type;
+ }
+ av_log(avctx, AV_LOG_VERBOSE, "Picking %d (%s) as default "
+ "deinterlacing mode.\n", ctx->mode,
+ deint_vaapi_mode_name(ctx->mode));
+ } else {
+ for (i = 0; i < ctx->nb_deint_caps; i++) {
+ if (ctx->deint_caps[i].type == ctx->mode)
+ break;
+ }
+ if (i >= ctx->nb_deint_caps) {
+ av_log(avctx, AV_LOG_ERROR, "Deinterlacing mode %d (%s) is "
+ "not supported.\n", ctx->mode,
+ deint_vaapi_mode_name(ctx->mode));
+ }
+ }
+
+ params.type = VAProcFilterDeinterlacing;
+ params.algorithm = ctx->mode;
+ params.flags = 0;
+
+ av_assert0(ctx->filter_buffer == VA_INVALID_ID);
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ VAProcFilterParameterBufferType,
+ sizeof(params), 1, ¶ms,
+ &ctx->filter_buffer);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create deinterlace "
+ "parameter buffer: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ vas = vaQueryVideoProcPipelineCaps(ctx->hwctx->display,
+ ctx->va_context,
+ &ctx->filter_buffer, 1,
+ &ctx->pipeline_caps);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query pipeline "
+ "caps: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ ctx->queue_depth = ctx->pipeline_caps.num_backward_references +
+ ctx->pipeline_caps.num_forward_references + 1;
+ if (ctx->queue_depth > MAX_REFERENCES) {
+ av_log(avctx, AV_LOG_ERROR, "Pipeline requires too many "
+ "references (%u forward, %u back).\n",
+ ctx->pipeline_caps.num_forward_references,
+ ctx->pipeline_caps.num_backward_references);
+ return AVERROR(ENOSYS);
+ }
+
+ return 0;
+}
+
+static int deint_vaapi_config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *avctx = outlink->src;
+ DeintVAAPIContext *ctx = avctx->priv;
+ AVVAAPIHWConfig *hwconfig = NULL;
+ AVHWFramesConstraints *constraints = NULL;
+ AVVAAPIFramesContext *va_frames;
+ VAStatus vas;
+ int err;
+
+ deint_vaapi_pipeline_uninit(avctx);
+
+ av_assert0(ctx->input_frames);
+ ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
+ ctx->hwctx = ((AVHWDeviceContext*)ctx->device_ref->data)->hwctx;
+
+ ctx->output_width = ctx->input_frames->width;
+ ctx->output_height = ctx->input_frames->height;
+
+ av_assert0(ctx->va_config == VA_INVALID_ID);
+ vas = vaCreateConfig(ctx->hwctx->display, VAProfileNone,
+ VAEntrypointVideoProc, 0, 0, &ctx->va_config);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create processing pipeline "
+ "config: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
+ if (!hwconfig) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ hwconfig->config_id = ctx->va_config;
+
+ constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
+ hwconfig);
+ if (!constraints) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if (ctx->output_width < constraints->min_width ||
+ ctx->output_height < constraints->min_height ||
+ ctx->output_width > constraints->max_width ||
+ ctx->output_height > constraints->max_height) {
+ av_log(avctx, AV_LOG_ERROR, "Hardware does not support "
+ "deinterlacing to size %dx%d "
+ "(constraints: width %d-%d height %d-%d).\n",
+ ctx->output_width, ctx->output_height,
+ constraints->min_width, constraints->max_width,
+ constraints->min_height, constraints->max_height);
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ err = deint_vaapi_build_filter_params(avctx);
+ if (err < 0)
+ goto fail;
+
+ ctx->output_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
+ if (!ctx->output_frames_ref) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create HW frame context "
+ "for output.\n");
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ ctx->output_frames = (AVHWFramesContext*)ctx->output_frames_ref->data;
+
+ ctx->output_frames->format = AV_PIX_FMT_VAAPI;
+ ctx->output_frames->sw_format = ctx->input_frames->sw_format;
+ ctx->output_frames->width = ctx->output_width;
+ ctx->output_frames->height = ctx->output_height;
+
+ // The number of output frames we need is determined by what follows
+ // the filter. If it's an encoder with complex frame reference
+ // structures then this could be very high.
+ ctx->output_frames->initial_pool_size = 10;
+
+ err = av_hwframe_ctx_init(ctx->output_frames_ref);
+ if (err < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to initialise VAAPI frame "
+ "context for output: %d\n", err);
+ goto fail;
+ }
+
+ va_frames = ctx->output_frames->hwctx;
+
+ av_assert0(ctx->va_context == VA_INVALID_ID);
+ vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
+ ctx->output_width, ctx->output_height, 0,
+ va_frames->surface_ids, va_frames->nb_surfaces,
+ &ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create processing pipeline "
+ "context: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ outlink->w = ctx->output_width;
+ outlink->h = ctx->output_height;
+
+ outlink->hw_frames_ctx = av_buffer_ref(ctx->output_frames_ref);
+ if (!outlink->hw_frames_ctx) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ av_freep(&hwconfig);
+ av_hwframe_constraints_free(&constraints);
+ return 0;
+
+fail:
+ av_buffer_unref(&ctx->output_frames_ref);
+ av_freep(&hwconfig);
+ av_hwframe_constraints_free(&constraints);
+ return err;
+}
+
+static int vaapi_proc_colour_standard(enum AVColorSpace av_cs)
+{
+ switch(av_cs) {
+#define CS(av, va) case AVCOL_SPC_ ## av: return VAProcColorStandard ## va;
+ CS(BT709, BT709);
+ CS(BT470BG, BT470BG);
+ CS(SMPTE170M, SMPTE170M);
+ CS(SMPTE240M, SMPTE240M);
+#undef CS
+ default:
+ return VAProcColorStandardNone;
+ }
+}
+
+static int deint_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
+{
+ AVFilterContext *avctx = inlink->dst;
+ AVFilterLink *outlink = avctx->outputs[0];
+ DeintVAAPIContext *ctx = avctx->priv;
+ AVFrame *output_frame = NULL;
+ VASurfaceID input_surface, output_surface;
+ VASurfaceID backward_references[MAX_REFERENCES];
+ VASurfaceID forward_references[MAX_REFERENCES];
+ VAProcPipelineParameterBuffer params;
+ VAProcFilterParameterBufferDeinterlacing *filter_params;
+ VARectangle input_region;
+ VABufferID params_id;
+ VAStatus vas;
+ void *filter_params_addr = NULL;
+ int err, i;
+
+ av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
+ av_get_pix_fmt_name(input_frame->format),
+ input_frame->width, input_frame->height, input_frame->pts);
+
+ if (ctx->queue_count < ctx->queue_depth) {
+ ctx->frame_queue[ctx->queue_count++] = input_frame;
+ if (ctx->queue_count < ctx->queue_depth) {
+ // Need more reference surfaces before we can continue.
+ return 0;
+ }
+ } else {
+ av_frame_free(&ctx->frame_queue[0]);
+ for (i = 0; i + 1 < ctx->queue_count; i++)
+ ctx->frame_queue[i] = ctx->frame_queue[i + 1];
+ ctx->frame_queue[i] = input_frame;
+ }
+
+ input_frame =
+ ctx->frame_queue[ctx->pipeline_caps.num_backward_references];
+ input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3];
+ for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++)
+ backward_references[i] = (VASurfaceID)(uintptr_t)
+ ctx->frame_queue[ctx->pipeline_caps.num_backward_references -
+ i - 1]->data[3];
+ for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++)
+ forward_references[i] = (VASurfaceID)(uintptr_t)
+ ctx->frame_queue[ctx->pipeline_caps.num_backward_references +
+ i + 1]->data[3];
+
+ av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for "
+ "deinterlace input.\n", input_surface);
+ av_log(avctx, AV_LOG_DEBUG, "Backward references:");
+ for (i = 0; i < ctx->pipeline_caps.num_backward_references; i++)
+ av_log(avctx, AV_LOG_DEBUG, " %#x", backward_references[i]);
+ av_log(avctx, AV_LOG_DEBUG, "\n");
+ av_log(avctx, AV_LOG_DEBUG, "Forward references:");
+ for (i = 0; i < ctx->pipeline_caps.num_forward_references; i++)
+ av_log(avctx, AV_LOG_DEBUG, " %#x", forward_references[i]);
+ av_log(avctx, AV_LOG_DEBUG, "\n");
+
+ output_frame = ff_get_video_buffer(outlink, ctx->output_width,
+ ctx->output_height);
+ if (!output_frame) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3];
+ av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for "
+ "deinterlace output.\n", output_surface);
+
+ memset(¶ms, 0, sizeof(params));
+
+ input_region = (VARectangle) {
+ .x = 0,
+ .y = 0,
+ .width = input_frame->width,
+ .height = input_frame->height,
+ };
+
+ params.surface = input_surface;
+ params.surface_region = &input_region;
+ params.surface_color_standard =
+ vaapi_proc_colour_standard(input_frame->colorspace);
+
+ params.output_region = NULL;
+ params.output_background_color = 0xff000000;
+ params.output_color_standard = params.surface_color_standard;
+
+ params.pipeline_flags = 0;
+ params.filter_flags = VA_FRAME_PICTURE;
+
+ vas = vaMapBuffer(ctx->hwctx->display, ctx->filter_buffer,
+ &filter_params_addr);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to map filter parameter "
+ "buffer: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ filter_params = filter_params_addr;
+ filter_params->flags = 0;
+ if (input_frame->interlaced_frame && !input_frame->top_field_first)
+ filter_params->flags |= VA_DEINTERLACING_BOTTOM_FIELD_FIRST;
+ filter_params_addr = NULL;
+ vas = vaUnmapBuffer(ctx->hwctx->display, ctx->filter_buffer);
+ if (vas != VA_STATUS_SUCCESS)
+ av_log(avctx, AV_LOG_ERROR, "Failed to unmap filter parameter "
+ "buffer: %d (%s).\n", vas, vaErrorStr(vas));
+
+ params.filters = &ctx->filter_buffer;
+ params.num_filters = 1;
+
+ params.forward_references = forward_references;
+ params.num_forward_references =
+ ctx->pipeline_caps.num_forward_references;
+ params.backward_references = backward_references;
+ params.num_backward_references =
+ ctx->pipeline_caps.num_backward_references;
+
+ vas = vaBeginPicture(ctx->hwctx->display,
+ ctx->va_context, output_surface);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to attach new picture: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ VAProcPipelineParameterBufferType,
+ sizeof(params), 1, ¶ms, ¶ms_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_after_begin;
+ }
+ av_log(avctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n",
+ params_id);
+
+ vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
+ ¶ms_id, 1);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to render parameter buffer: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_after_begin;
+ }
+
+ vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to start picture processing: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_after_render;
+ }
+
+ if (ctx->hwctx->driver_quirks &
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) {
+ vas = vaDestroyBuffer(ctx->hwctx->display, params_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to free parameter buffer: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ // And ignore.
+ }
+ }
+
+ err = av_frame_copy_props(output_frame, input_frame);
+ if (err < 0)
+ goto fail;
+
+ av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n",
+ av_get_pix_fmt_name(output_frame->format),
+ output_frame->width, output_frame->height, output_frame->pts);
+
+ return ff_filter_frame(outlink, output_frame);
+
+fail_after_begin:
+ vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1);
+fail_after_render:
+ vaEndPicture(ctx->hwctx->display, ctx->va_context);
+fail:
+ if (filter_params_addr)
+ vaUnmapBuffer(ctx->hwctx->display, ctx->filter_buffer);
+ av_frame_free(&output_frame);
+ return err;
+}
+
+static av_cold int deint_vaapi_init(AVFilterContext *avctx)
+{
+ DeintVAAPIContext *ctx = avctx->priv;
+
+ ctx->va_config = VA_INVALID_ID;
+ ctx->va_context = VA_INVALID_ID;
+ ctx->filter_buffer = VA_INVALID_ID;
+ ctx->valid_ids = 1;
+
+ return 0;
+}
+
+static av_cold void deint_vaapi_uninit(AVFilterContext *avctx)
+{
+ DeintVAAPIContext *ctx = avctx->priv;
+
+ if (ctx->valid_ids)
+ deint_vaapi_pipeline_uninit(avctx);
+
+ av_buffer_unref(&ctx->input_frames_ref);
+ av_buffer_unref(&ctx->output_frames_ref);
+ av_buffer_unref(&ctx->device_ref);
+}
+
+#define OFFSET(x) offsetof(DeintVAAPIContext, x)
+#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM)
+static const AVOption deint_vaapi_options[] = {
+ { "mode", "Deinterlacing mode",
+ OFFSET(mode), AV_OPT_TYPE_INT, { .i64 = VAProcDeinterlacingNone },
+ VAProcDeinterlacingNone, VAProcDeinterlacingCount - 1, FLAGS, "mode" },
+ { "default", "Use the highest-numbered (and therefore possibly most advanced) deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingNone }, .unit = "mode" },
+ { "bob", "Use the bob deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingBob }, .unit = "mode" },
+ { "weave", "Use the weave deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingWeave }, .unit = "mode" },
+ { "motion_adaptive", "Use the motion adaptive deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionAdaptive }, .unit = "mode" },
+ { "motion_compensated", "Use the motion compensated deinterlacing algorithm",
+ 0, AV_OPT_TYPE_CONST, { .i64 = VAProcDeinterlacingMotionCompensated }, .unit = "mode" },
+ { NULL },
+};
+
+static const AVClass deint_vaapi_class = {
+ .class_name = "deinterlace_vaapi",
+ .item_name = av_default_item_name,
+ .option = deint_vaapi_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+static const AVFilterPad deint_vaapi_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = &deint_vaapi_filter_frame,
+ .config_props = &deint_vaapi_config_input,
+ },
+ { NULL }
+};
+
+static const AVFilterPad deint_vaapi_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = &deint_vaapi_config_output,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_deinterlace_vaapi = {
+ .name = "deinterlace_vaapi",
+ .description = NULL_IF_CONFIG_SMALL("Deinterlacing of VAAPI surfaces"),
+ .priv_size = sizeof(DeintVAAPIContext),
+ .init = &deint_vaapi_init,
+ .uninit = &deint_vaapi_uninit,
+ .query_formats = &deint_vaapi_query_formats,
+ .inputs = deint_vaapi_inputs,
+ .outputs = deint_vaapi_outputs,
+ .priv_class = &deint_vaapi_class,
+ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+};
More information about the ffmpeg-cvslog
mailing list