[FFmpeg-cvslog] lavfi/dualinput: reimplement on top of framesync.
Nicolas George
git at videolan.org
Mon Sep 23 15:11:38 CEST 2013
ffmpeg | branch: master | Nicolas George <nicolas.george at normalesup.org> | Wed Aug 28 00:07:22 2013 +0200| [231e50157cc9a7a8b08ba3d0e566279fc6cbb800] | committer: Nicolas George
lavfi/dualinput: reimplement on top of framesync.
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=231e50157cc9a7a8b08ba3d0e566279fc6cbb800
---
libavfilter/Makefile | 8 +--
libavfilter/dualinput.c | 149 +++++++++++++---------------------------------
libavfilter/dualinput.h | 8 ++-
libavfilter/vf_blend.c | 4 ++
libavfilter/vf_lut3d.c | 4 ++
libavfilter/vf_overlay.c | 5 ++
libavfilter/vf_psnr.c | 4 ++
7 files changed, 66 insertions(+), 116 deletions(-)
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 7e6d901..b2d3587 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -113,7 +113,7 @@ OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o
OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o
OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o
-OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o
+OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o dualinput.o framesync.o
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o
OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o
OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o
@@ -141,7 +141,7 @@ OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
-OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o
+OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o dualinput.o framesync.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o
OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o
@@ -165,7 +165,7 @@ OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o
OBJS-$(CONFIG_NULL_FILTER) += vf_null.o
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o
OBJS-$(CONFIG_OPENCL) += deshake_opencl.o unsharp_opencl.o
-OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o
+OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o dualinput.o framesync.o
OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
@@ -173,7 +173,7 @@ OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
OBJS-$(CONFIG_PP_FILTER) += vf_pp.o
-OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o
+OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o dualinput.o framesync.o
OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o
OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
diff --git a/libavfilter/dualinput.c b/libavfilter/dualinput.c
index 10e3652..179c3ef 100644
--- a/libavfilter/dualinput.c
+++ b/libavfilter/dualinput.c
@@ -16,144 +16,75 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#define MAIN 0
-#define SECOND 1
-
#include "dualinput.h"
#include "libavutil/timestamp.h"
-static int try_filter_frame(FFDualInputContext *s,
- AVFilterContext *ctx, AVFrame *mainpic)
+static int process_frame(FFFrameSync *fs)
{
- int ret;
-
- /* Discard obsolete second frames: if there is a next second frame with pts
- * before the main frame, we can drop the current second. */
- while (1) {
- AVFrame *next_overpic = ff_bufqueue_peek(&s->queue[SECOND], 0);
- if (!next_overpic && s->second_eof && !s->repeatlast) {
- av_frame_free(&s->second_frame);
- break;
- }
- if (!next_overpic || av_compare_ts(next_overpic->pts, ctx->inputs[SECOND]->time_base,
- mainpic->pts, ctx->inputs[MAIN]->time_base) > 0)
- break;
- ff_bufqueue_get(&s->queue[SECOND]);
- av_frame_free(&s->second_frame);
- s->second_frame = next_overpic;
+ AVFilterContext *ctx = fs->parent;
+ FFDualInputContext *s = fs->opaque;
+ AVFrame *mainpic = NULL, *secondpic = NULL;
+ int ret = 0;
+
+ if ((ret = ff_framesync_get_frame(&s->fs, 0, &mainpic, 1)) < 0 ||
+ (ret = ff_framesync_get_frame(&s->fs, 1, &secondpic, 0)) < 0) {
+ av_frame_free(&mainpic);
+ return ret;
}
-
- /* If there is no next frame and no EOF and the second frame is before
- * the main frame, we can not know yet if it will be superseded. */
- if (!s->queue[SECOND].available && !s->second_eof &&
- (!s->second_frame || av_compare_ts(s->second_frame->pts, ctx->inputs[SECOND]->time_base,
- mainpic->pts, ctx->inputs[MAIN]->time_base) < 0))
- return AVERROR(EAGAIN);
-
- /* At this point, we know that the current second frame extends to the
- * time of the main frame. */
- av_dlog(ctx, "main_pts:%s main_pts_time:%s",
- av_ts2str(mainpic->pts), av_ts2timestr(mainpic->pts, &ctx->inputs[MAIN]->time_base));
- if (s->second_frame)
- av_dlog(ctx, " second_pts:%s second_pts_time:%s",
- av_ts2str(s->second_frame->pts), av_ts2timestr(s->second_frame->pts, &ctx->inputs[SECOND]->time_base));
- av_dlog(ctx, "\n");
-
- if (s->second_frame && !ctx->is_disabled)
- mainpic = s->process(ctx, mainpic, s->second_frame);
+ av_assert0(mainpic);
+ mainpic->pts = av_rescale_q(mainpic->pts, s->fs.time_base, ctx->outputs[0]->time_base);
+ if (secondpic && !ctx->is_disabled)
+ mainpic = s->process(ctx, mainpic, secondpic);
ret = ff_filter_frame(ctx->outputs[0], mainpic);
av_assert1(ret != AVERROR(EAGAIN));
s->frame_requested = 0;
return ret;
}
-static int try_filter_next_frame(FFDualInputContext *s, AVFilterContext *ctx)
-{
- AVFrame *next_mainpic = ff_bufqueue_peek(&s->queue[MAIN], 0);
- int ret;
-
- if (!next_mainpic)
- return AVERROR(EAGAIN);
- if ((ret = try_filter_frame(s, ctx, next_mainpic)) == AVERROR(EAGAIN))
- return ret;
- ff_bufqueue_get(&s->queue[MAIN]);
- return ret;
-}
-
-static int flush_frames(FFDualInputContext *s, AVFilterContext *ctx)
+int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s)
{
- int ret;
+ FFFrameSyncIn *in = s->fs.in;
+
+ ff_framesync_init(&s->fs, ctx, 2);
+ s->fs.opaque = s;
+ s->fs.on_event = process_frame;
+ in[0].time_base = ctx->inputs[0]->time_base;
+ in[1].time_base = ctx->inputs[1]->time_base;
+ in[0].sync = 2;
+ in[0].before = EXT_STOP;
+ in[0].after = EXT_INFINITY;
+ in[1].sync = 1;
+ in[1].before = EXT_NULL;
+ in[1].after = EXT_INFINITY;
+
+ if (s->shortest)
+ in[1].after = EXT_STOP;
+ if (!s->repeatlast) {
+ in[0].after = EXT_STOP;
+ in[1].sync = 0;
+ }
- while (!(ret = try_filter_next_frame(s, ctx)));
- return ret == AVERROR(EAGAIN) ? 0 : ret;
+ return ff_framesync_configure(&s->fs);
}
int ff_dualinput_filter_frame_main(FFDualInputContext *s,
AVFilterLink *inlink, AVFrame *in)
{
- AVFilterContext *ctx = inlink->dst;
- int ret;
-
- if ((ret = flush_frames(s, ctx)) < 0)
- return ret;
- if ((ret = try_filter_frame(s, ctx, in)) < 0) {
- if (ret != AVERROR(EAGAIN))
- return ret;
- ff_bufqueue_add(ctx, &s->queue[MAIN], in);
- }
-
- if (!s->second_frame)
- return 0;
- flush_frames(s, ctx);
-
- return 0;
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
}
int ff_dualinput_filter_frame_second(FFDualInputContext *s,
AVFilterLink *inlink, AVFrame *in)
{
- AVFilterContext *ctx = inlink->dst;
- int ret;
-
- if ((ret = flush_frames(s, ctx)) < 0)
- return ret;
- ff_bufqueue_add(ctx, &s->queue[SECOND], in);
- ret = try_filter_next_frame(s, ctx);
- return ret == AVERROR(EAGAIN) ? 0 : ret;
+ return ff_framesync_filter_frame(&s->fs, inlink, in);
}
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink)
{
- AVFilterContext *ctx = outlink->src;
- int input, ret;
-
- if (!try_filter_next_frame(s, ctx))
- return 0;
- s->frame_requested = 1;
- while (s->frame_requested) {
- /* TODO if we had a frame duration, we could guess more accurately */
- input = !s->second_eof && (s->queue[MAIN].available ||
- s->queue[SECOND].available < 2) ?
- SECOND : MAIN;
- ret = ff_request_frame(ctx->inputs[input]);
- /* EOF on main is reported immediately */
- if (ret == AVERROR_EOF && input == SECOND) {
- s->second_eof = 1;
- if (s->shortest)
- return ret;
- if ((ret = try_filter_next_frame(s, ctx)) != AVERROR(EAGAIN))
- return ret;
- ret = 0; /* continue requesting frames on main */
- }
- if (ret < 0)
- return ret;
- }
- return 0;
+ return ff_framesync_request_frame(&s->fs, outlink);
}
void ff_dualinput_uninit(FFDualInputContext *s)
{
- av_frame_free(&s->second_frame);
- ff_bufqueue_discard_all(&s->queue[MAIN]);
- ff_bufqueue_discard_all(&s->queue[SECOND]);
+ ff_framesync_uninit(&s->fs);
}
diff --git a/libavfilter/dualinput.h b/libavfilter/dualinput.h
index 98d0544..aaefd78 100644
--- a/libavfilter/dualinput.h
+++ b/libavfilter/dualinput.h
@@ -26,18 +26,20 @@
#include <stdint.h>
#include "bufferqueue.h"
+#include "framesync.h"
#include "internal.h"
typedef struct {
+ FFFrameSync fs;
+ FFFrameSyncIn second_input; /* must be immediately after fs */
+
uint8_t frame_requested;
- uint8_t second_eof;
- AVFrame *second_frame;
- struct FFBufQueue queue[2];
AVFrame *(*process)(AVFilterContext *ctx, AVFrame *main, const AVFrame *second);
int shortest; ///< terminate stream when the second input terminates
int repeatlast; ///< repeat last second frame
} FFDualInputContext;
+int ff_dualinput_init(AVFilterContext *ctx, FFDualInputContext *s);
int ff_dualinput_filter_frame_main(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
int ff_dualinput_filter_frame_second(FFDualInputContext *s, AVFilterLink *inlink, AVFrame *in);
int ff_dualinput_request_frame(FFDualInputContext *s, AVFilterLink *outlink);
diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c
index 1152361..3bc8eec 100644
--- a/libavfilter/vf_blend.c
+++ b/libavfilter/vf_blend.c
@@ -368,6 +368,7 @@ static int config_output(AVFilterLink *outlink)
AVFilterLink *bottomlink = ctx->inputs[BOTTOM];
BlendContext *b = ctx->priv;
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(toplink->format);
+ int ret;
if (toplink->format != bottomlink->format) {
av_log(ctx, AV_LOG_ERROR, "inputs must be of same pixel format\n");
@@ -399,6 +400,9 @@ static int config_output(AVFilterLink *outlink)
b->vsub = pix_desc->log2_chroma_h;
b->nb_planes = av_pix_fmt_count_planes(toplink->format);
+ if ((ret = ff_dualinput_init(ctx, &b->dinput)) < 0)
+ return ret;
+
return 0;
}
diff --git a/libavfilter/vf_lut3d.c b/libavfilter/vf_lut3d.c
index 05318f8..3c1b482 100644
--- a/libavfilter/vf_lut3d.c
+++ b/libavfilter/vf_lut3d.c
@@ -656,10 +656,14 @@ static void update_clut(LUT3DContext *lut3d, const AVFrame *frame)
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
+ LUT3DContext *lut3d = ctx->priv;
+ int ret;
outlink->w = ctx->inputs[0]->w;
outlink->h = ctx->inputs[0]->h;
outlink->time_base = ctx->inputs[0]->time_base;
+ if ((ret = ff_dualinput_init(ctx, &lut3d->dinput)) < 0)
+ return ret;
return 0;
}
diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
index 0c2722d..c3f1c2e 100644
--- a/libavfilter/vf_overlay.c
+++ b/libavfilter/vf_overlay.c
@@ -309,6 +309,11 @@ static int config_input_overlay(AVFilterLink *inlink)
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
+ OverlayContext *s = ctx->priv;
+ int ret;
+
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
outlink->w = ctx->inputs[MAIN]->w;
outlink->h = ctx->inputs[MAIN]->h;
diff --git a/libavfilter/vf_psnr.c b/libavfilter/vf_psnr.c
index bb0dbe0..c4af4ac 100644
--- a/libavfilter/vf_psnr.c
+++ b/libavfilter/vf_psnr.c
@@ -305,13 +305,17 @@ static int config_input_ref(AVFilterLink *inlink)
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
+ PSNRContext *s = ctx->priv;
AVFilterLink *mainlink = ctx->inputs[0];
+ int ret;
outlink->w = mainlink->w;
outlink->h = mainlink->h;
outlink->time_base = mainlink->time_base;
outlink->sample_aspect_ratio = mainlink->sample_aspect_ratio;
outlink->frame_rate = mainlink->frame_rate;
+ if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0)
+ return ret;
return 0;
}
More information about the ffmpeg-cvslog
mailing list