[FFmpeg-devel] [PATCH] lavfi: add kerndeint filter
Jérémy Tran
tran.jeremy.av at gmail.com
Thu Oct 18 09:50:53 CEST 2012
This is a port of the vf_kerndeint filter (libmpcodecs/vf_kerndeint) by
Donal A. Graft (original avisynth plugin author).
The filter works fine using YUV colorspace.
When using RGB, it is not binary equal with mp=kerndeint (the output video
looks fine so this may be a little mistake I did somewhere).
I also removed many non-32bits colorspaces since the alogrithm does not
seem to be made to work on shorter pixels, is this okay ?
The only non-32-bits colorspace left is YUYV422 which is what mp=kerndeint
uses, but I don't seem to get the same results (my first thought was
because it does not have 32bpp).
The FATE test will come after this.
---
configure | 1 +
doc/filters.texi | 45 ++++++
libavfilter/Makefile | 1 +
libavfilter/allfilters.c | 1 +
libavfilter/vf_kerndeint.c | 377 +++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 425 insertions(+)
create mode 100644 libavfilter/vf_kerndeint.c
diff --git a/configure b/configure
index 5aa891b..31abba2 100755
--- a/configure
+++ b/configure
@@ -1917,6 +1917,7 @@ frei0r_filter_extralibs='$ldl'
frei0r_src_filter_deps="frei0r dlopen"
frei0r_src_filter_extralibs='$ldl'
hqdn3d_filter_deps="gpl"
+kerndeint_filter_deps="gpl"
movie_filter_deps="avcodec avformat"
mp_filter_deps="gpl avcodec swscale postproc inline_asm"
mptestsrc_filter_deps="gpl"
diff --git a/doc/filters.texi b/doc/filters.texi
index 725c7b5..94df075 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -2431,6 +2431,51 @@ If a parameter is omitted, it is kept at its current value.
Interlaceing detect filter. This filter tries to detect if the input is
interlaced or progressive. Top or bottom field first.
+ at section kerndeint
+
+Donald Graft's adaptive kernel deinterlacer. Work on interlaced parts of a video
+to produce progressive frames.
+
+This @var{kerndeint} filter accepts the following optional parameters:
+[@var{threshold}[:@var{map}[:@var{order}[:@var{sharp}[:@var{twoway}]]]]]
+
+ at var{threshold} must be an integer in the range [0,255] and defaults to 10.
+Affect the filter's tolerance when determining if a pixel line must be
+processed. A value of 0 will result in applying the process on every pixels.
+
+ at var{map} must be 0 (ignore pixels exceeding the threshold) or 1 (paint pixels
+exceeding the threshold white) and defaults to 0.
+
+ at var{order} must be 0 (leave fields alone) or 1 (swap fields) and defaults to 0.
+
+ at var{sharp} must be 0 (disable additional sharpening) or 1 (enable additional
+sharpening) and defaults to 0.
+
+ at var{twoway} must be 0 (disable twoway sharpening) or 1 (enable twoway
+sharpening) and defaults to 0.
+
+Some examples follow:
+ at itemize
+ at item
+Those two commands are equivalent
+ at example
+kerndeint
+kerndeint=10:0:0:0:0
+ at end example
+
+ at item
+Enable additional sharpening
+ at example
+kerndeint=10:0:0:1:0
+ at end example
+
+ at item
+Paint processed pixels in white
+ at example
+kerndeint=10:1:0:0:0
+ at end example
+ at end itemize
+
@section lut, lutrgb, lutyuv
Compute a look-up table for binding each pixel component input value
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 3618f10..35d85e4 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -109,6 +109,7 @@ OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o
OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o
OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o
+OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 348f369..72db7d7 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -101,6 +101,7 @@ void avfilter_register_all(void)
REGISTER_FILTER (HQDN3D, hqdn3d, vf);
REGISTER_FILTER (HUE, hue, vf);
REGISTER_FILTER (IDET, idet, vf);
+ REGISTER_FILTER (KERNDEINT, kerndeint, vf);
REGISTER_FILTER (LUT, lut, vf);
REGISTER_FILTER (LUTRGB, lutrgb, vf);
REGISTER_FILTER (LUTYUV, lutyuv, vf);
diff --git a/libavfilter/vf_kerndeint.c b/libavfilter/vf_kerndeint.c
new file mode 100644
index 0000000..e533842
--- /dev/null
+++ b/libavfilter/vf_kerndeint.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright (c) 2012 Jeremy Tran
+ * Copyright (c) 2004 Tobias Diedrich
+ * Copyright (c) 2003 Donald A. Graft
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+/**
+ * @file
+ * Kernel Deinterlacer
+ * Ported from MPlayer libmpcodecs/vf_kerndeint.c.
+ */
+
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+
+#define PLANAR_Y 0
+#define PLANAR_U 1
+#define PLANAR_V 2
+
+#define RGB 0
+#define YUYV 1
+#define YUV 2
+
+typedef struct {
+ const AVClass *class;
+ int frame;
+ int map;
+ int order;
+ int thresh;
+ int sharp;
+ int twoway;
+ int do_deinterlace;
+ int hsub;
+ int vsub;
+ int format; ///< Pixel format: RGB, YUYV or YUV
+ uint8_t *temp_data[4]; ///< Temporary video plane data buffer
+ int bpp;
+} KerndeintContext;
+
+#define OFFSET(x) offsetof(KerndeintContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption kerndeint_options[] = {
+ { "thresh", "set the threshold", OFFSET(thresh), AV_OPT_TYPE_INT,
+ { 10 }, 0, 255, FLAGS },
+ { "map", "set the map", OFFSET(map), AV_OPT_TYPE_INT,
+ { 0 }, 0, 1, FLAGS },
+ { "order", "set the order", OFFSET(order), AV_OPT_TYPE_INT,
+ { 0 }, 0, 1, FLAGS },
+ { "sharp", "set the sharpening mode", OFFSET(sharp), AV_OPT_TYPE_INT,
+ { 0 }, 0, 1, FLAGS },
+ { "twoway", "activate twoway", OFFSET(twoway), AV_OPT_TYPE_INT,
+ { 0 }, 0, 1, FLAGS },
+ { NULL }
+};
+
+AVFILTER_DEFINE_CLASS(kerndeint);
+
+static av_cold int init(AVFilterContext *ctx, const char *args)
+{
+ KerndeintContext *kerndeint = ctx->priv;
+ int i;
+ const char const * shorthand[] = { "thresh", "map", "order", "sharp", "twoway", NULL };
+
+ kerndeint->class = &kerndeint_class;
+ kerndeint->do_deinterlace = 1;
+ for (i = 0; i < 4; ++i)
+ kerndeint->temp_data[i] = NULL;
+ av_opt_set_defaults(kerndeint);
+
+ return av_opt_set_from_string(kerndeint, args, shorthand, "=", ":");
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+ KerndeintContext *kerndeint = ctx->priv;
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ av_free(kerndeint->temp_data[i]);
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum PixelFormat pix_fmts[] = {
+ PIX_FMT_YUV420P,
+ PIX_FMT_YUYV422,
+ PIX_FMT_ARGB,
+ PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int config_props(AVFilterLink *inlink)
+{
+ KerndeintContext *kerndeint = inlink->dst->priv;
+ const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format];
+ int i;
+
+ kerndeint->hsub = desc->log2_chroma_w;
+ kerndeint->vsub = desc->log2_chroma_h;
+ kerndeint->bpp = av_get_bits_per_pixel(desc) / 8;
+
+ for (i = 0; i < 4; ++i)
+ kerndeint->temp_data[i] = av_mallocz(inlink->h * inlink->w * kerndeint->bpp);
+
+ switch (inlink->format) {
+ case PIX_FMT_YUV420P:
+ kerndeint->format = YUV;
+ break;
+ case PIX_FMT_YUYV422:
+ kerndeint->format = YUYV;
+ break;
+ case PIX_FMT_ARGB:
+ kerndeint->format = RGB;
+ break;
+ }
+
+ return 0;
+}
+
+#define AVWN32(buf, value) *(uint32_t *)(buf) = value
+
+static int end_frame(AVFilterLink *inlink)
+{
+ KerndeintContext *kerndeint = inlink->dst->priv;
+ AVFilterBufferRef *inpic = inlink->cur_buf;
+ AVFilterBufferRef *outpic = inlink->dst->outputs[0]->out_buf;
+ int cw = inlink->w >> kerndeint->hsub;
+ int ch = inlink->h >> kerndeint->vsub;
+
+ const unsigned char *prvp; ///< Previous field's pixel line number n
+ const unsigned char *prvpp; ///< Previous field's pixel line number (n - 1)
+ const unsigned char *prvpn; ///< Previous field's pixel line number (n + 1)
+ const unsigned char *prvppp; ///< Previous field's pixel line number (n - 2)
+ const unsigned char *prvpnn; ///< Previous field's pixel line number (n + 2)
+ const unsigned char *prvp4p; ///< Previous field's pixel line number (n - 4)
+ const unsigned char *prvp4n; ///< Previous field's pixel line number (n + 4)
+
+ const unsigned char *srcp; ///< Current field's pixel line number n
+ const unsigned char *srcpp; ///< Current field's pixel line number (n - 1)
+ const unsigned char *srcpn; ///< Current field's pixel line number (n + 1)
+ const unsigned char *srcppp; ///< Current field's pixel line number (n - 2)
+ const unsigned char *srcpnn; ///< Current field's pixel line number (n + 2)
+ const unsigned char *srcp3p; ///< Current field's pixel line number (n - 3)
+ const unsigned char *srcp3n; ///< Current field's pixel line number (n + 3)
+ const unsigned char *srcp4p; ///< Current field's pixel line number (n - 4)
+ const unsigned char *srcp4n; ///< Current field's pixel line number (n + 4)
+
+ unsigned char *dstp, *dstp_saved;
+ const unsigned char *srcp_saved;
+
+ int src_linesize;
+ int psrc_linesize;
+ int dst_linesize;
+ int x, y, plane, g;
+ int n = kerndeint->frame++;
+ int val, hi, lo, w, h;
+ double valf;
+ const int threshold = kerndeint->thresh;
+ const int order = kerndeint->order;
+ const int map = kerndeint->map;
+ const int sharp = kerndeint->sharp;
+ const int twoway = kerndeint->twoway;
+
+ if (kerndeint->do_deinterlace) {
+ for (plane = 0; inpic->data[plane] && plane < 4; plane++) {
+ h = plane == PLANAR_Y ? inlink->h : ch;
+ w = plane == PLANAR_Y ? inlink->w * kerndeint->bpp : cw;
+
+ srcp = srcp_saved = inpic->data[plane];
+ src_linesize = inpic->linesize[plane];
+ psrc_linesize = outpic->linesize[plane];
+ dstp = dstp_saved = outpic->data[plane];
+ dst_linesize = outpic->linesize[plane];
+ srcp = srcp_saved + (1 - order) * src_linesize;
+ dstp = dstp_saved + (1 - order) * dst_linesize;
+
+ for (y = 0; y < h; y += 2) {
+ memcpy(dstp, srcp, w);
+ srcp += 2 * src_linesize;
+ dstp += 2 * dst_linesize;
+ }
+
+ // Copy through the lines that will be missed below.
+ memcpy(dstp_saved + order * dst_linesize, srcp_saved + (1 - order) * src_linesize, w);
+ memcpy(dstp_saved + (2 + order) * dst_linesize, srcp_saved + (3 - order) * src_linesize, w);
+ memcpy(dstp_saved + (h - 2 + order) * dst_linesize, srcp_saved + (h - 1 - order) * src_linesize, w);
+ memcpy(dstp_saved + (h - 4 + order) * dst_linesize, srcp_saved + (h - 3 - order) * src_linesize, w);
+
+ /* For the other field choose adaptively between using the previous field
+ or the interpolant from the current field. */
+ prvp = kerndeint->temp_data[plane] + 5 * psrc_linesize - (1 - order) * psrc_linesize;
+ prvpp = prvp - psrc_linesize;
+ prvppp = prvp - 2 * psrc_linesize;
+ prvp4p = prvp - 4 * psrc_linesize;
+ prvpn = prvp + psrc_linesize;
+ prvpnn = prvp + 2 * psrc_linesize;
+ prvp4n = prvp + 4 * psrc_linesize;
+
+ srcp = srcp_saved + 5 * src_linesize - (1 - order) * src_linesize;
+ srcpp = srcp - src_linesize;
+ srcppp = srcp - 2 * src_linesize;
+ srcp3p = srcp - 3 * src_linesize;
+ srcp4p = srcp - 4 * src_linesize;
+
+ srcpn = srcp + src_linesize;
+ srcpnn = srcp + 2 * src_linesize;
+ srcp3n = srcp + 3 * src_linesize;
+ srcp4n = srcp + 4 * src_linesize;
+
+ dstp = dstp_saved + 5 * dst_linesize - (1 - order) * dst_linesize;
+
+ for (y = 5 - (1 - order); y <= h - 5 - (1 - order); y += 2) {
+ for (x = 0; x < w; x++) {
+ if ((threshold == 0) || (n == 0) ||
+ (abs((int)prvp[x] - (int)srcp[x]) > threshold) ||
+ (abs((int)prvpp[x] - (int)srcpp[x]) > threshold) ||
+ (abs((int)prvpn[x] - (int)srcpn[x]) > threshold)) {
+ if (map == 1) {
+ g = x & ~3;
+
+ if (kerndeint->format == RGB) {
+ AVWN32(dstp + g, 0xffffffff);
+ x = g + 3;
+ } else if (kerndeint->format == YUYV) {
+ AVWN32(dstp + g, 0xeb80eb80);
+ x = g + 3;
+ } else {
+ dstp[x] = plane == PLANAR_Y ? 235 : 128;
+ }
+ } else {
+ if (kerndeint->format == RGB) {
+ hi = 255;
+ lo = 0;
+ } else if (kerndeint->format == YUYV) {
+ hi = (x & 1) ? 240 : 235;
+ lo = 16;
+ } else {
+ hi = (plane == PLANAR_Y) ? 235 : 240;
+ lo = 16;
+ }
+
+ if (sharp == 1) {
+ if (twoway == 1) {
+ valf = 0.526 * ((int)srcpp[x] + (int)srcpn[x])
+ + 0.170 * ((int)srcp[x] + (int)prvp[x])
+ - 0.116 * ((int)srcppp[x]
+ + (int)srcpnn[x]
+ + (int)prvppp[x]
+ + (int)prvpnn[x])
+ - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
+ + 0.031 * ((int)srcp4p[x]
+ + (int)srcp4n[x]
+ + (int)prvp4p[x]
+ + (int)prvp4n[x]);
+ } else {
+ valf = 0.526 * ((int)srcpp[x] + (int)srcpn[x])
+ + 0.170 * ((int)prvp[x])
+ - 0.116 * ((int)prvppp[x] + (int)prvpnn[x])
+ - 0.026 * ((int)srcp3p[x] + (int)srcp3n[x])
+ + 0.031 * ((int)prvp4p[x] + (int)prvp4p[x]);
+ }
+
+ valf = av_clip(valf, lo, hi);
+ dstp[x] = (int) valf;
+ } else {
+ if (twoway == 1) {
+ val = (8 * ((int)srcpp[x] + (int)srcpn[x])
+ + 2 * ((int)srcp[x] + (int)prvp[x])
+ - (int)(srcppp[x]) - (int)(srcpnn[x])
+ - (int)(prvppp[x]) - (int)(prvpnn[x]))
+ >> 4;
+ } else {
+ val = (8 * ((int)srcpp[x]
+ + (int)srcpn[x])
+ + 2 * ((int)prvp[x])
+ - (int)(prvppp[x]) - (int)(prvpnn[x]))
+ >> 4;
+ }
+
+ val = av_clip(val, lo, hi);
+ dstp[x] = val;
+ }
+ }
+ } else {
+ dstp[x] = srcp[x];
+ }
+ }
+ prvp += 2 * psrc_linesize;
+ prvpp += 2 * psrc_linesize;
+ prvppp += 2 * psrc_linesize;
+ prvpn += 2 * psrc_linesize;
+ prvpnn += 2 * psrc_linesize;
+ prvp4p += 2 * psrc_linesize;
+ prvp4n += 2 * psrc_linesize;
+ srcp += 2 * src_linesize;
+ srcpp += 2 * src_linesize;
+ srcppp += 2 * src_linesize;
+ srcp3p += 2 * src_linesize;
+ srcp4p += 2 * src_linesize;
+ srcpn += 2 * src_linesize;
+ srcpnn += 2 * src_linesize;
+ srcp3n += 2 * src_linesize;
+ srcp4n += 2 * src_linesize;
+ dstp += 2 * dst_linesize;
+ }
+
+ srcp = inpic->data[plane];
+ dstp = kerndeint->temp_data[plane];
+ av_image_copy_plane(dstp, psrc_linesize, srcp, src_linesize, w, h);
+ }
+ } else {
+ for (plane = 0; inpic->data[plane] && plane < 4; plane++) {
+ h = plane == PLANAR_Y ? inlink->h : ch;
+ w = plane == PLANAR_Y ? inlink->w * kerndeint->bpp : cw;
+ av_image_copy_plane(outpic->data[plane], outpic->linesize[plane],
+ inpic->data[plane], inpic->linesize[plane],
+ w, h);
+ }
+ }
+
+ return ff_end_frame(inlink->dst->outputs[0]);
+}
+
+AVFilter avfilter_vf_kerndeint = {
+ .name = "kerndeint",
+ .description = NULL_IF_CONFIG_SMALL("Kernel deinterlacer"),
+
+ .priv_size = sizeof(KerndeintContext),
+
+ .init = init,
+ .uninit = uninit,
+ .query_formats = query_formats,
+
+ .inputs = (const AVFilterPad[]) {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .end_frame = end_frame,
+ .config_props = config_props,
+ .min_perms = AV_PERM_READ,
+ },
+ { .name = NULL }
+ },
+ .outputs = (const AVFilterPad[]) {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { .name = NULL }
+ },
+ .priv_class = &kerndeint_class,
+};
--
1.7.11.3
More information about the ffmpeg-devel
mailing list