[FFmpeg-devel] [WIP] [PATCH 2/2] lavfi/dualinput: reimplement on top of framesync.
Nicolas George
nicolas.george at normalesup.org
Wed Aug 28 00:21:11 CEST 2013
Signed-off-by: Nicolas George <nicolas.george at normalesup.org>
---
libavfilter/Makefile | 8 +--
libavfilter/dualinput.c | 127 +++++++++++++++++++---------------------------
libavfilter/dualinput.h | 5 ++
libavfilter/vf_overlay.c | 5 ++
4 files changed, 67 insertions(+), 78 deletions(-)
Also TODO: add the init function call to other filers than overlay, and
rework the filters to use framesync directly, so they can benefit from the
new features.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 3751d54..f2d1c1b 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -111,7 +111,7 @@ OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o
OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o
OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o
-OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o
+OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o
OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o
OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o
@@ -139,7 +139,7 @@ OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
-OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o
+OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o framesync.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o
OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o
@@ -163,14 +163,14 @@ OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o
OBJS-$(CONFIG_NULL_FILTER) += vf_null.o
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o
OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o
-OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o
+OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o framesync.o
OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
OBJS-$(CONFIG_PP_FILTER) += vf_pp.o
-OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o
+OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o framesync.o
OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o
diff --git a/libavfilter/dualinput.c b/libavfilter/dualinput.c
index 10e3652..25e4ae0 100644
--- a/libavfilter/dualinput.c
+++ b/libavfilter/dualinput.c
@@ -16,67 +16,59 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/*
+ * TODO
+ * Clean up obsolete code
+ * Map shortest to the corresponding flags
+ * Implement repeatlast
+ */
+
#define MAIN 0
#define SECOND 1
#include "dualinput.h"
#include "libavutil/timestamp.h"
-static int try_filter_frame(FFDualInputContext *s,
- AVFilterContext *ctx, AVFrame *mainpic)
+int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s)
{
- int ret;
+ FFFrameSyncIn *in = s->fs.in;
- /* Discard obsolete second frames: if there is a next second frame with pts
- * before the main frame, we can drop the current second. */
- while (1) {
- AVFrame *next_overpic = ff_bufqueue_peek(&s->queue[SECOND], 0);
- if (!next_overpic && s->second_eof && !s->repeatlast) {
- av_frame_free(&s->second_frame);
- break;
- }
- if (!next_overpic || av_compare_ts(next_overpic->pts, ctx->inputs[SECOND]->time_base,
- mainpic->pts, ctx->inputs[MAIN]->time_base) > 0)
- break;
- ff_bufqueue_get(&s->queue[SECOND]);
- av_frame_free(&s->second_frame);
- s->second_frame = next_overpic;
- }
+ ff_framesync_init(&s->fs, ctx, 2);
+ in[0].time_base = ctx->inputs[0]->time_base;
+ in[1].time_base = ctx->inputs[1]->time_base;
+ in[0].sync = 1;
- /* If there is no next frame and no EOF and the second frame is before
- * the main frame, we can not know yet if it will be superseded. */
- if (!s->queue[SECOND].available && !s->second_eof &&
- (!s->second_frame || av_compare_ts(s->second_frame->pts, ctx->inputs[SECOND]->time_base,
- mainpic->pts, ctx->inputs[MAIN]->time_base) < 0))
- return AVERROR(EAGAIN);
+ in[1].after = EXT_NULL;
- /* At this point, we know that the current second frame extends to the
- * time of the main frame. */
- av_dlog(ctx, "main_pts:%s main_pts_time:%s",
- av_ts2str(mainpic->pts), av_ts2timestr(mainpic->pts, &ctx->inputs[MAIN]->time_base));
- if (s->second_frame)
- av_dlog(ctx, " second_pts:%s second_pts_time:%s",
- av_ts2str(s->second_frame->pts), av_ts2timestr(s->second_frame->pts, &ctx->inputs[SECOND]->time_base));
- av_dlog(ctx, "\n");
-
- if (s->second_frame && !ctx->is_disabled)
- mainpic = s->process(ctx, mainpic, s->second_frame);
- ret = ff_filter_frame(ctx->outputs[0], mainpic);
- av_assert1(ret != AVERROR(EAGAIN));
- s->frame_requested = 0;
- return ret;
+ return ff_framesync_configure(&s->fs);
}
static int try_filter_next_frame(FFDualInputContext *s, AVFilterContext *ctx)
{
- AVFrame *next_mainpic = ff_bufqueue_peek(&s->queue[MAIN], 0);
- int ret;
+ AVFrame *mainpic = NULL, *secondpic = NULL;
+ int ret = 0;
- if (!next_mainpic)
+ ff_framesync_next(&s->fs);
+ if (s->fs.eof)
+ return AVERROR_EOF;
+ if (!s->fs.frame_ready)
return AVERROR(EAGAIN);
- if ((ret = try_filter_frame(s, ctx, next_mainpic)) == AVERROR(EAGAIN))
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &mainpic, 1)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &secondpic, 0)) < 0) {
+ av_frame_free(&mainpic);
return ret;
- ff_bufqueue_get(&s->queue[MAIN]);
+ }
+ if (!mainpic) { /* FIXME temporary */
+ av_log(0, 16, "null filtering with secondpic %p %ld\n", secondpic, secondpic ? secondpic->pts : 666666);
+ ff_framesync_drop(&s->fs);
+ return 0;
+ }
+ if (secondpic && !ctx->is_disabled)
+ mainpic = s->process(ctx, mainpic, secondpic);
+ ret = ff_filter_frame(ctx->outputs[0], mainpic);
+ av_assert1(ret != AVERROR(EAGAIN));
+ s->frame_requested = 0;
+ ff_framesync_drop(&s->fs);
return ret;
}
@@ -88,38 +80,29 @@ static int flush_frames(FFDualInputContext *s, AVFilterContext *ctx)
return ret == AVERROR(EAGAIN) ? 0 : ret;
}
-int ff_dualinput_filter_frame_main(FFDualInputContext *s,
- AVFilterLink *inlink, AVFrame *in)
+static int dualinput_filter_frame_common(FFDualInputContext *s, unsigned in_no,
+ AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
int ret;
if ((ret = flush_frames(s, ctx)) < 0)
return ret;
- if ((ret = try_filter_frame(s, ctx, in)) < 0) {
- if (ret != AVERROR(EAGAIN))
- return ret;
- ff_bufqueue_add(ctx, &s->queue[MAIN], in);
- }
-
- if (!s->second_frame)
- return 0;
- flush_frames(s, ctx);
+ if ((ret = ff_framesync_add_frame(&s->fs, in_no, in)) < 0)
+ return ret;
+ return flush_frames(s, ctx);
+}
- return 0;
+int ff_dualinput_filter_frame_main(FFDualInputContext *s,
+ AVFilterLink *inlink, AVFrame *in)
+{
+ return dualinput_filter_frame_common(s, 0, inlink, in);
}
int ff_dualinput_filter_frame_second(FFDualInputContext *s,
AVFilterLink *inlink, AVFrame *in)
{
- AVFilterContext *ctx = inlink->dst;
- int ret;
-
- if ((ret = flush_frames(s, ctx)) < 0)
- return ret;
- ff_bufqueue_add(ctx, &s->queue[SECOND], in);
- ret = try_filter_next_frame(s, ctx);
- return ret == AVERROR(EAGAIN) ? 0 : ret;
+ return dualinput_filter_frame_common(s, 1, inlink, in);
}
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink)
@@ -131,19 +114,15 @@ int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink)
return 0;
s->frame_requested = 1;
while (s->frame_requested) {
- /* TODO if we had a frame duration, we could guess more accurately */
- input = !s->second_eof && (s->queue[MAIN].available ||
- s->queue[SECOND].available < 2) ?
- SECOND : MAIN;
+ input = s->fs.in_request;
ret = ff_request_frame(ctx->inputs[input]);
- /* EOF on main is reported immediately */
- if (ret == AVERROR_EOF && input == SECOND) {
- s->second_eof = 1;
- if (s->shortest)
+ if (ret == AVERROR_EOF) {
+ if ((ret = ff_framesync_add_frame(&s->fs, input, NULL)) < 0)
return ret;
- if ((ret = try_filter_next_frame(s, ctx)) != AVERROR(EAGAIN))
+ if ((ret = try_filter_next_frame(s, ctx)) < 0 &&
+ ret != AVERROR(EAGAIN))
return ret;
- ret = 0; /* continue requesting frames on main */
+ ret = 0;
}
if (ret < 0)
return ret;
diff --git a/libavfilter/dualinput.h b/libavfilter/dualinput.h
index 98d0544..5893f13 100644
--- a/libavfilter/dualinput.h
+++ b/libavfilter/dualinput.h
@@ -26,9 +26,13 @@
#include <stdint.h>
#include "bufferqueue.h"
+#include "framesync.h"
#include "internal.h"
typedef struct {
+ FFFrameSync fs;
+ FFFrameSyncIn second_input; /* must be immediately after fs */
+
uint8_t frame_requested;
uint8_t second_eof;
AVFrame *second_frame;
@@ -38,6 +42,7 @@ typedef struct {
int repeatlast; ///< repeat last second frame
} FFDualInputContext;
+int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s);
int ff_dualinput_filter_frame_main(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
int ff_dualinput_filter_frame_second(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink);
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index 7d93c9e..d838594 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -310,6 +310,11 @@ static int config_input_overlay(AVFilterLink *inlink)
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
+ OverlayContext *s = ctx->priv;
+ int ret;
+
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
outlink->w = ctx->inputs[MAIN]->w;
outlink->h = ctx->inputs[MAIN]->h;
--
1.7.10.4
More information about the ffmpeg-devel
mailing list