[FFmpeg-devel] [PATCH 3/3] vf_overlay: rewrite request/push logic.
Stefano Sabatini
stefasab at gmail.com
Wed May 9 02:49:47 CEST 2012
On date Friday 2012-04-27 17:38:42 +0200, Nicolas George encoded:
> The old implementation, upon receiving a frame on the main
> input, would request an overlay frame if necessary. This could
> generate an infinite recursion, off-by-one frame mismatch and
> other harmful effects, especially if the split filter is present
> upgraph.
>
> The new implementation uses the linear approach: it uses two
> buffer queues for frames received out of turn and forwards
> request_frame calls to the input where a frame is necessary.
>
> Signed-off-by: Nicolas George <nicolas.george at normalesup.org>
> ---
> libavfilter/vf_overlay.c | 220 +++++++++++++++++++++++++++++++---------------
> 1 files changed, 149 insertions(+), 71 deletions(-)
>
> diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
> index 8227d35..1a8b720 100644
> --- a/libavfilter/vf_overlay.c
> +++ b/libavfilter/vf_overlay.c
> @@ -36,6 +36,7 @@
> #include "libavutil/mathematics.h"
> #include "libavutil/timestamp.h"
> #include "internal.h"
> +#include "bufferqueue.h"
> #include "drawutils.h"
>
> static const char *const var_names[] = {
> @@ -71,6 +72,8 @@ typedef struct {
> int x, y; ///< position of overlayed picture
>
> int allow_packed_rgb;
> + uint8_t frame_requested;
> + uint8_t overlay_eof;
> uint8_t main_is_packed_rgb;
> uint8_t main_rgba_map[4];
> uint8_t main_has_alpha;
> @@ -78,7 +81,9 @@ typedef struct {
> uint8_t overlay_rgba_map[4];
> uint8_t overlay_has_alpha;
>
> - AVFilterBufferRef *overpicref, *overpicref_next;
> + AVFilterBufferRef *overpicref;
> + struct ff_bufqueue queue_main;
> + struct ff_bufqueue queue_over;
>
> int main_pix_step[4]; ///< steps per pixel for each plane of the main output
> int overlay_pix_step[4]; ///< steps per pixel for each plane of the overlay
> @@ -149,8 +154,8 @@ static av_cold void uninit(AVFilterContext *ctx)
>
> if (over->overpicref)
> avfilter_unref_buffer(over->overpicref);
> - if (over->overpicref_next)
> - avfilter_unref_buffer(over->overpicref_next);
> + ff_bufqueue_discard_all(&over->queue_main);
> + ff_bufqueue_discard_all(&over->queue_over);
> }
>
> static int query_formats(AVFilterContext *ctx)
> @@ -304,51 +309,6 @@ static AVFilterBufferRef *get_video_buffer(AVFilterLink *link, int perms, int w,
> return avfilter_get_video_buffer(link->dst->outputs[0], perms, w, h);
> }
>
> -static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
> -{
> - AVFilterBufferRef *outpicref = avfilter_ref_buffer(inpicref, ~0);
> - AVFilterContext *ctx = inlink->dst;
> - OverlayContext *over = ctx->priv;
> - av_unused AVFilterLink *outlink = ctx->outputs[0];
> -
> - inlink->dst->outputs[0]->out_buf = outpicref;
> - outpicref->pts = av_rescale_q(outpicref->pts, ctx->inputs[MAIN]->time_base,
> - ctx->outputs[0]->time_base);
> -
> - if (!over->overpicref || over->overpicref->pts < outpicref->pts) {
> - if (!over->overpicref_next)
> - avfilter_request_frame(ctx->inputs[OVERLAY]);
> -
> - if (over->overpicref && over->overpicref_next &&
> - over->overpicref_next->pts <= outpicref->pts) {
> - avfilter_unref_buffer(over->overpicref);
> - over->overpicref = over->overpicref_next;
> - over->overpicref_next = NULL;
> - }
> - }
> -
> - av_dlog(ctx, "main_pts:%s main_pts_time:%s",
> - av_ts2str(outpicref->pts), av_ts2timestr(outpicref->pts, &outlink->time_base));
> - if (over->overpicref)
> - av_dlog(ctx, " over_pts:%s over_pts_time:%s",
> - av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base));
> - av_dlog(ctx, "\n");
> -
> - avfilter_start_frame(inlink->dst->outputs[0], outpicref);
> -}
> -
> -static void start_frame_overlay(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
> -{
> - AVFilterContext *ctx = inlink->dst;
> - OverlayContext *over = ctx->priv;
> -
> - inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[OVERLAY]->time_base,
> - ctx->outputs[0]->time_base);
> -
> - if (!over->overpicref) over->overpicref = inpicref;
> - else over->overpicref_next = inpicref;
> -}
> -
> // divide by 255 and round to nearest
> // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
> #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
> @@ -483,45 +443,163 @@ static void blend_slice(AVFilterContext *ctx,
> }
> }
>
> -static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir)
> +static int try_start_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic)
nit: mainpicref?
> {
> - AVFilterContext *ctx = inlink->dst;
> + OverlayContext *over = ctx->priv;
> + AVFilterLink *outlink = ctx->outputs[0];
> + AVFilterBufferRef *next_over, *outpicref;
> +
> + while (1) {
> + next_over = ff_bufqueue_get(&over->queue_over, 0);
nit: i'd say next_overpicref, makes clear the relation with overpicref
> + if (!next_over || next_over->pts > mainpic->pts)
> + break;
> + ff_bufqueue_take(&over->queue_over);
> + avfilter_unref_buffer(over->overpicref);
> + over->overpicref = next_over;
> + }
Please comment these macroblocks, so it is much easier to read the
code.
So at the end of this block we have the last overpicref with pts <=
mainpicref->pts in over->overpicref, and in over->queue_over all the
following cached frames.
> + if (!over->queue_over.available && !over->overlay_eof &&
> + (!over->overpicref || over->overpicref->pts < mainpic->pts))
> + return AVERROR(EAGAIN);
try again if the queue is empty, the overlay stream is not ended and
there is no overpicref or its pts is < mainpicref->pts, seems right.
> + outlink->out_buf = outpicref = avfilter_ref_buffer(mainpic, ~0);
> +
> + av_dlog(ctx, "main_pts:%s main_pts_time:%s",
> + av_ts2str(outpicref->pts), av_ts2timestr(outpicref->pts, &outlink->time_base));
> + if (over->overpicref)
> + av_dlog(ctx, " over_pts:%s over_pts_time:%s",
> + av_ts2str(over->overpicref->pts), av_ts2timestr(over->overpicref->pts, &outlink->time_base));
> + av_dlog(ctx, "\n");
> +
> + avfilter_start_frame(ctx->outputs[0], avfilter_ref_buffer(outpicref, ~0));
> + over->frame_requested = 0;
> + return 0;
> +}
> +
> +static int try_start_next_frame(AVFilterContext *ctx)
> +{
> + OverlayContext *over = ctx->priv;
> + AVFilterBufferRef *next_main = ff_bufqueue_get(&over->queue_main, 0);
My head spins when I can't say if a variable refers a queue, a pad or
a picref, I suggest next_main -> next_mainpicref, yes it should be
obvious by the "next_" but I prefer some redundancy to help my brain
circuits to focus (especially at 2:30 in the morning).
> + if (!next_main || try_start_frame(ctx, next_main) < 0)
> + return AVERROR(EAGAIN);
so this is, try again if there is not still a next mainpicref in the
main queue, or try_start_again() can't provide a new outpicref.
> + avfilter_unref_buffer(ff_bufqueue_take(&over->queue_main));
> + return 0;
> +}
> +
> +static int try_push_frame(AVFilterContext *ctx)
> +{
> + OverlayContext *over = ctx->priv;
> AVFilterLink *outlink = ctx->outputs[0];
> AVFilterBufferRef *outpicref = outlink->out_buf;
> +
> + if (try_start_next_frame(ctx) < 0)
> + return AVERROR(EAGAIN);
> + outpicref = outlink->out_buf;
> + if (over->overpicref)
> + blend_slice(ctx, outpicref, over->overpicref, over->x, over->y,
> + over->overpicref->video->w, over->overpicref->video->h,
> + 0, outpicref->video->w, outpicref->video->h);
> + avfilter_draw_slice(outlink, 0, outpicref->video->h, +1);
> + avfilter_unref_bufferp(&outlink->out_buf);
> + avfilter_end_frame(outlink);
> + return 0;
> +}
> +
>
> +static void flush_frames(AVFilterContext *ctx)
> +{
> + while (!try_push_frame(ctx));
> +}
>
> +static void start_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
> +{
> + AVFilterContext *ctx = inlink->dst;
> + OverlayContext *over = ctx->priv;
> +
> + flush_frames(ctx);
> + inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[MAIN]->time_base,
> + ctx->outputs[0]->time_base);
> + if (try_start_frame(ctx, inpicref) < 0)
> + ff_bufqueue_add(ctx, &over->queue_main, inpicref);
> +}
> +
> +static void draw_slice_main(AVFilterLink *inlink, int y, int h, int slice_dir)
> +{
> + AVFilterContext *ctx = inlink->dst;
> OverlayContext *over = ctx->priv;
> + AVFilterLink *outlink = ctx->outputs[0];
> + AVFilterBufferRef *outpicref = outlink->out_buf;
>
> + if (!outpicref)
> + return;
> if (over->overpicref &&
> - !(over->x >= outpicref->video->w || over->y >= outpicref->video->h ||
unrelated?
> - y+h < over->y || y >= over->y + over->overpicref->video->h)) {
> + y + h > over->y && y < over->y + over->overpicref->video->h) {
> blend_slice(ctx, outpicref, over->overpicref, over->x, over->y,
> over->overpicref->video->w, over->overpicref->video->h,
> y, outpicref->video->w, h);
> }
> avfilter_draw_slice(outlink, y, h, slice_dir);
> +
> }
nit++: stray newline
>
> -static void end_frame(AVFilterLink *inlink)
> +static void end_frame_main(AVFilterLink *inlink)
> {
> - avfilter_end_frame(inlink->dst->outputs[0]);
> - avfilter_unref_buffer(inlink->cur_buf);
> -}
> + AVFilterContext *ctx = inlink->dst;
> + AVFilterLink *outlink = ctx->outputs[0];
> + AVFilterBufferRef *outpicref = outlink->out_buf;
> + flush_frames(ctx);
>
> -static void null_draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { }
> + if (!outpicref)
> + return;
> + avfilter_unref_bufferp(&inlink->cur_buf);
> + avfilter_unref_bufferp(&outlink->out_buf);
> + avfilter_end_frame(ctx->outputs[0]);
> +}
>
> -static void null_end_frame(AVFilterLink *inlink) { }
> +static void start_frame_over(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
> +{
> +}
>
> -static int poll_frame(AVFilterLink *link)
> +static void end_frame_over(AVFilterLink *inlink)
> {
> - AVFilterContext *s = link->src;
> - OverlayContext *over = s->priv;
> - int ret = avfilter_poll_frame(s->inputs[OVERLAY]);
> + AVFilterContext *ctx = inlink->dst;
> + OverlayContext *over = ctx->priv;
> + AVFilterBufferRef *inpicref = inlink->cur_buf;
>
> - if (ret == AVERROR_EOF)
> - ret = !!over->overpicref;
> + flush_frames(ctx);
> + inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[OVERLAY]->time_base,
> + ctx->outputs[0]->time_base);
> + ff_bufqueue_add(ctx, &over->queue_over, inpicref);
> + try_push_frame(ctx);
> +}
>
> - return ret && avfilter_poll_frame(s->inputs[MAIN]);
> +static int request_frame(AVFilterLink *link)
> +{
> + AVFilterContext *ctx = link->src;
nit: link->outlink
> + OverlayContext *over = ctx->priv;
> + int input, r;
nit: r->ret
> +
> + if (!try_push_frame(ctx))
> + return 0;
> + over->frame_requested = 1;
> + while (over->frame_requested) {
> + /* TODO if we had a frame duration, we could guess more accurately */
reminds me that I have a patchset for this somewhere
[...]
--
FFmpeg = Foolish & Fundamentalist Merciless Problematic Eretic Genius
More information about the ffmpeg-devel
mailing list