[FFmpeg-devel] [RFC][PATCH] ffplay: factorize subtitle rendering code to a private filter
Marton Balint
cus at passwd.hu
Sat Feb 14 02:08:15 CET 2015
Signed-off-by: Marton Balint <cus at passwd.hu>
---
Makefile | 1 +
doc/ffplay.texi | 4 +
ffplay.c | 336 +++++--------------------------------
libavfilter/Makefile | 1 +
libavfilter/vf_ffplay_subtitle.c | 347 +++++++++++++++++++++++++++++++++++++++
libavfilter/vf_ffplay_subtitle.h | 38 +++++
6 files changed, 434 insertions(+), 293 deletions(-)
create mode 100644 libavfilter/vf_ffplay_subtitle.c
create mode 100644 libavfilter/vf_ffplay_subtitle.h
diff --git a/Makefile b/Makefile
index 845a274..8b6b914 100644
--- a/Makefile
+++ b/Makefile
@@ -32,6 +32,7 @@ OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
+OBJS-ffplay += libavfilter/vf_ffplay_subtitle.o
OBJS-ffserver += ffserver_config.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
diff --git a/doc/ffplay.texi b/doc/ffplay.texi
index 45731a2..3e66fde 100644
--- a/doc/ffplay.texi
+++ b/doc/ffplay.texi
@@ -91,6 +91,10 @@ syntax.
You can specify this parameter multiple times and cycle through the specified
filtergraphs along with the show modes by pressing the key @key{w}.
+You can use the special @code{ffplay_subtitle} filter to blend movie subtitles
+onto the video somewhere in the filtergraph. If no such filter is specified, it
+will be implicity added to the output of the filtergraph.
+
@item -af @var{filtergraph}
@var{filtergraph} is a description of the filtergraph to apply to
the input audio.
diff --git a/ffplay.c b/ffplay.c
index 8c62f9c..e49f333 100644
--- a/ffplay.c
+++ b/ffplay.c
@@ -52,6 +52,7 @@
# include "libavfilter/avfilter.h"
# include "libavfilter/buffersink.h"
# include "libavfilter/buffersrc.h"
+# include "libavfilter/vf_ffplay_subtitle.h"
#endif
#include <SDL.h>
@@ -310,7 +311,7 @@ static int screen_width = 0;
static int screen_height = 0;
static int audio_disable;
static int video_disable;
-static int subtitle_disable;
+static int subtitle_disable = !CONFIG_AVFILTER;
static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
static int seek_by_bytes = -1;
static int display_disable;
@@ -825,235 +826,6 @@ static void fill_border(int xleft, int ytop, int width, int height, int x, int y
color, update);
}
-#define ALPHA_BLEND(a, oldp, newp, s)\
-((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
-
-#define RGBA_IN(r, g, b, a, s)\
-{\
- unsigned int v = ((const uint32_t *)(s))[0];\
- a = (v >> 24) & 0xff;\
- r = (v >> 16) & 0xff;\
- g = (v >> 8) & 0xff;\
- b = v & 0xff;\
-}
-
-#define YUVA_IN(y, u, v, a, s, pal)\
-{\
- unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
- a = (val >> 24) & 0xff;\
- y = (val >> 16) & 0xff;\
- u = (val >> 8) & 0xff;\
- v = val & 0xff;\
-}
-
-#define YUVA_OUT(d, y, u, v, a)\
-{\
- ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
-}
-
-
-#define BPP 1
-
-static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
-{
- int wrap, wrap3, width2, skip2;
- int y, u, v, a, u1, v1, a1, w, h;
- uint8_t *lum, *cb, *cr;
- const uint8_t *p;
- const uint32_t *pal;
- int dstx, dsty, dstw, dsth;
-
- dstw = av_clip(rect->w, 0, imgw);
- dsth = av_clip(rect->h, 0, imgh);
- dstx = av_clip(rect->x, 0, imgw - dstw);
- dsty = av_clip(rect->y, 0, imgh - dsth);
- lum = dst->data[0] + dsty * dst->linesize[0];
- cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
- cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
-
- width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
- skip2 = dstx >> 1;
- wrap = dst->linesize[0];
- wrap3 = rect->pict.linesize[0];
- p = rect->pict.data[0];
- pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
-
- if (dsty & 1) {
- lum += dstx;
- cb += skip2;
- cr += skip2;
-
- if (dstx & 1) {
- YUVA_IN(y, u, v, a, p, pal);
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
- cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
- cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
- cb++;
- cr++;
- lum++;
- p += BPP;
- }
- for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
- YUVA_IN(y, u, v, a, p, pal);
- u1 = u;
- v1 = v;
- a1 = a;
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-
- YUVA_IN(y, u, v, a, p + BPP, pal);
- u1 += u;
- v1 += v;
- a1 += a;
- lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
- cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
- cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
- cb++;
- cr++;
- p += 2 * BPP;
- lum += 2;
- }
- if (w) {
- YUVA_IN(y, u, v, a, p, pal);
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
- cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
- cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
- p++;
- lum++;
- }
- p += wrap3 - dstw * BPP;
- lum += wrap - dstw - dstx;
- cb += dst->linesize[1] - width2 - skip2;
- cr += dst->linesize[2] - width2 - skip2;
- }
- for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
- lum += dstx;
- cb += skip2;
- cr += skip2;
-
- if (dstx & 1) {
- YUVA_IN(y, u, v, a, p, pal);
- u1 = u;
- v1 = v;
- a1 = a;
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
- p += wrap3;
- lum += wrap;
- YUVA_IN(y, u, v, a, p, pal);
- u1 += u;
- v1 += v;
- a1 += a;
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
- cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
- cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
- cb++;
- cr++;
- p += -wrap3 + BPP;
- lum += -wrap + 1;
- }
- for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
- YUVA_IN(y, u, v, a, p, pal);
- u1 = u;
- v1 = v;
- a1 = a;
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-
- YUVA_IN(y, u, v, a, p + BPP, pal);
- u1 += u;
- v1 += v;
- a1 += a;
- lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
- p += wrap3;
- lum += wrap;
-
- YUVA_IN(y, u, v, a, p, pal);
- u1 += u;
- v1 += v;
- a1 += a;
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-
- YUVA_IN(y, u, v, a, p + BPP, pal);
- u1 += u;
- v1 += v;
- a1 += a;
- lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
-
- cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
- cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
-
- cb++;
- cr++;
- p += -wrap3 + 2 * BPP;
- lum += -wrap + 2;
- }
- if (w) {
- YUVA_IN(y, u, v, a, p, pal);
- u1 = u;
- v1 = v;
- a1 = a;
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
- p += wrap3;
- lum += wrap;
- YUVA_IN(y, u, v, a, p, pal);
- u1 += u;
- v1 += v;
- a1 += a;
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
- cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
- cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
- cb++;
- cr++;
- p += -wrap3 + BPP;
- lum += -wrap + 1;
- }
- p += wrap3 + (wrap3 - dstw * BPP);
- lum += wrap + (wrap - dstw - dstx);
- cb += dst->linesize[1] - width2 - skip2;
- cr += dst->linesize[2] - width2 - skip2;
- }
- /* handle odd height */
- if (h) {
- lum += dstx;
- cb += skip2;
- cr += skip2;
-
- if (dstx & 1) {
- YUVA_IN(y, u, v, a, p, pal);
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
- cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
- cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
- cb++;
- cr++;
- lum++;
- p += BPP;
- }
- for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
- YUVA_IN(y, u, v, a, p, pal);
- u1 = u;
- v1 = v;
- a1 = a;
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
-
- YUVA_IN(y, u, v, a, p + BPP, pal);
- u1 += u;
- v1 += v;
- a1 += a;
- lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
- cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
- cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
- cb++;
- cr++;
- p += 2 * BPP;
- lum += 2;
- }
- if (w) {
- YUVA_IN(y, u, v, a, p, pal);
- lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
- cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
- cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
- }
- }
-}
-
static void free_picture(Frame *vp)
{
if (vp->bmp) {
@@ -1096,37 +868,10 @@ static void calculate_display_rect(SDL_Rect *rect,
static void video_image_display(VideoState *is)
{
Frame *vp;
- Frame *sp;
- AVPicture pict;
SDL_Rect rect;
- int i;
vp = frame_queue_peek(&is->pictq);
if (vp->bmp) {
- if (is->subtitle_st) {
- if (frame_queue_nb_remaining(&is->subpq) > 0) {
- sp = frame_queue_peek(&is->subpq);
-
- if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
- SDL_LockYUVOverlay (vp->bmp);
-
- pict.data[0] = vp->bmp->pixels[0];
- pict.data[1] = vp->bmp->pixels[2];
- pict.data[2] = vp->bmp->pixels[1];
-
- pict.linesize[0] = vp->bmp->pitches[0];
- pict.linesize[1] = vp->bmp->pitches[2];
- pict.linesize[2] = vp->bmp->pitches[1];
-
- for (i = 0; i < sp->sub.num_rects; i++)
- blend_subrect(&pict, sp->sub.rects[i],
- vp->bmp->w, vp->bmp->h);
-
- SDL_UnlockYUVOverlay (vp->bmp);
- }
- }
- }
-
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
@@ -1580,8 +1325,6 @@ static void video_refresh(void *opaque, double *remaining_time)
VideoState *is = opaque;
double time;
- Frame *sp, *sp2;
-
if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
check_external_clock_speed(is);
@@ -1655,26 +1398,6 @@ retry:
}
}
- if (is->subtitle_st) {
- while (frame_queue_nb_remaining(&is->subpq) > 0) {
- sp = frame_queue_peek(&is->subpq);
-
- if (frame_queue_nb_remaining(&is->subpq) > 1)
- sp2 = frame_queue_peek_next(&is->subpq);
- else
- sp2 = NULL;
-
- if (sp->serial != is->subtitleq.serial
- || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
- || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
- {
- frame_queue_next(&is->subpq);
- } else {
- break;
- }
- }
- }
-
display:
/* display picture */
if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
@@ -1908,6 +1631,34 @@ static int get_video_frame(VideoState *is, AVFrame *frame)
}
#if CONFIG_AVFILTER
+static int get_subtitle_callback(int64_t pts, AVSubtitle **sub, void *opaque)
+{
+ Frame *sp, *sp2;
+ FrameQueue *fq = (FrameQueue *)opaque;
+ double pts1 = av_q2d(AV_TIME_BASE_Q) * pts;
+
+ while (frame_queue_nb_remaining(fq) > 0) {
+ sp = frame_queue_peek(fq);
+
+ if (frame_queue_nb_remaining(fq) > 1)
+ sp2 = frame_queue_peek_next(fq);
+ else
+ sp2 = NULL;
+
+ if (sp->serial != fq->pktq->serial
+ || (pts1 > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
+ || (sp2 && pts1 > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
+ {
+ frame_queue_next(fq);
+ } else {
+ break;
+ }
+ }
+ if (sp && !fq->pktq->abort_request && pts1 >= sp->pts + ((float) sp->sub.start_display_time / 1000))
+ *sub = &sp->sub;
+ return 0;
+}
+
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
{
@@ -1960,6 +1711,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
AVCodecContext *codec = is->video_st->codec;
AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
+ int i;
av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
@@ -2008,6 +1760,8 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
last_filter = filt_ctx; \
} while (0)
+ INSERT_FILT("ffplay_subtitle", "");
+
/* SDL YUV code is not handling odd width/height for some driver
* combinations, therefore we crop the picture to an even width/height. */
INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
@@ -2033,6 +1787,15 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
goto fail;
+ for (i = 0; i < graph->nb_filters; i++) {
+ if (!strcmp(graph->filters[i]->filter->name, "ffplay_subtitle")) {
+ FFplaySubtitleContext *ffsubctx = graph->filters[i]->priv;
+ ffsubctx->get_subtitle_callback = get_subtitle_callback;
+ ffsubctx->opaque = &is->subpq;
+ break;
+ }
+ }
+
is->in_video_filter = filt_src;
is->out_video_filter = filt_out;
@@ -2313,8 +2076,6 @@ static int subtitle_thread(void *arg)
Frame *sp;
int got_subtitle;
double pts;
- int i, j;
- int r, g, b, y, u, v, a;
for (;;) {
if (!(sp = frame_queue_peek_writable(&is->subpq)))
@@ -2331,18 +2092,6 @@ static int subtitle_thread(void *arg)
sp->pts = pts;
sp->serial = is->subdec.pkt_serial;
- for (i = 0; i < sp->sub.num_rects; i++)
- {
- for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
- {
- RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
- y = RGB_TO_Y_CCIR(r, g, b);
- u = RGB_TO_U_CCIR(r, g, b, 0);
- v = RGB_TO_V_CCIR(r, g, b, 0);
- YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
- }
- }
-
/* now we can update the picture count */
frame_queue_push(&is->subpq);
} else if (got_subtitle) {
@@ -3783,6 +3532,7 @@ int main(int argc, char **argv)
#endif
#if CONFIG_AVFILTER
avfilter_register_all();
+ avfilter_register(&ff_vf_ffplay_subtitle);
#endif
av_register_all();
avformat_network_init();
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 21a3fbe..8e43836 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -119,6 +119,7 @@ OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o
OBJS-$(CONFIG_EQ_FILTER) += vf_eq.o
OBJS-$(CONFIG_EXTRACTPLANES_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o
+OBJS-$(CONFIG_FFPLAY_SUBTITLE_FILTER) += vf_ffplay_subtitle.o
OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o
OBJS-$(CONFIG_FIELDMATCH_FILTER) += vf_fieldmatch.o
OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o
diff --git a/libavfilter/vf_ffplay_subtitle.c b/libavfilter/vf_ffplay_subtitle.c
new file mode 100644
index 0000000..f89528b
--- /dev/null
+++ b/libavfilter/vf_ffplay_subtitle.c
@@ -0,0 +1,347 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FFplay subtitles burning filter.
+ */
+
+#include "config.h"
+#include "libavutil/colorspace.h"
+#include "libavutil/opt.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "video.h"
+#include "vf_ffplay_subtitle.h"
+
+static const AVOption ffplay_subtitle_options[] = {
+ {NULL},
+};
+
+AVFILTER_DEFINE_CLASS(ffplay_subtitle);
+
+#define ALPHA_BLEND(a, oldp, newp, s)\
+((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
+
+#define RGBA_IN(r, g, b, a, s)\
+{\
+ unsigned int v = ((const uint32_t *)(s))[0];\
+ a = (v >> 24) & 0xff;\
+ r = (v >> 16) & 0xff;\
+ g = (v >> 8) & 0xff;\
+ b = v & 0xff;\
+}
+
+#define YUVA_IN(y, u, v, a, s, pal)\
+{\
+ unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
+ a = (val >> 24) & 0xff;\
+ y = (val >> 16) & 0xff;\
+ u = (val >> 8) & 0xff;\
+ v = val & 0xff;\
+}
+
+#define YUVA_OUT(d, y, u, v, a)\
+{\
+ ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
+}
+
+
+#define BPP 1
+
+static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
+{
+ int wrap, wrap3, width2, skip2;
+ int y, u, v, a, u1, v1, a1, w, h;
+ uint8_t *lum, *cb, *cr;
+ const uint8_t *p;
+ const uint32_t *pal;
+ int dstx, dsty, dstw, dsth;
+
+ dstw = av_clip(rect->w, 0, imgw);
+ dsth = av_clip(rect->h, 0, imgh);
+ dstx = av_clip(rect->x, 0, imgw - dstw);
+ dsty = av_clip(rect->y, 0, imgh - dsth);
+ lum = dst->data[0] + dsty * dst->linesize[0];
+ cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
+ cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
+
+ width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
+ skip2 = dstx >> 1;
+ wrap = dst->linesize[0];
+ wrap3 = rect->pict.linesize[0];
+ p = rect->pict.data[0];
+ pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
+
+ if (dsty & 1) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ cb++;
+ cr++;
+ lum++;
+ p += BPP;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += 2 * BPP;
+ lum += 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ p++;
+ lum++;
+ }
+ p += wrap3 - dstw * BPP;
+ lum += wrap - dstw - dstx;
+ cb += dst->linesize[1] - width2 - skip2;
+ cr += dst->linesize[2] - width2 - skip2;
+ }
+ for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ p += wrap3;
+ lum += wrap;
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += -wrap3 + BPP;
+ lum += -wrap + 1;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ p += wrap3;
+ lum += wrap;
+
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
+
+ cb++;
+ cr++;
+ p += -wrap3 + 2 * BPP;
+ lum += -wrap + 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ p += wrap3;
+ lum += wrap;
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
+ cb++;
+ cr++;
+ p += -wrap3 + BPP;
+ lum += -wrap + 1;
+ }
+ p += wrap3 + (wrap3 - dstw * BPP);
+ lum += wrap + (wrap - dstw - dstx);
+ cb += dst->linesize[1] - width2 - skip2;
+ cr += dst->linesize[2] - width2 - skip2;
+ }
+ /* handle odd height */
+ if (h) {
+ lum += dstx;
+ cb += skip2;
+ cr += skip2;
+
+ if (dstx & 1) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ cb++;
+ cr++;
+ lum++;
+ p += BPP;
+ }
+ for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
+ YUVA_IN(y, u, v, a, p, pal);
+ u1 = u;
+ v1 = v;
+ a1 = a;
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+
+ YUVA_IN(y, u, v, a, p + BPP, pal);
+ u1 += u;
+ v1 += v;
+ a1 += a;
+ lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
+ cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
+ cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
+ cb++;
+ cr++;
+ p += 2 * BPP;
+ lum += 2;
+ }
+ if (w) {
+ YUVA_IN(y, u, v, a, p, pal);
+ lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
+ cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
+ cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
+ }
+ }
+}
+
+static int query_formats(AVFilterContext *ctx)
+{
+ static const enum AVPixelFormat pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE
+ };
+
+ ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
+
+ return 0;
+}
+
+static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
+{
+ AVFilterContext *ctx = inlink->dst;
+ AVFilterLink *outlink = ctx->outputs[0];
+ FFplaySubtitleContext *ffsub = ctx->priv;
+ AVSubtitle *sub = NULL;
+
+ if (ffsub->get_subtitle_callback && picref->pts != AV_NOPTS_VALUE)
+ ffsub->get_subtitle_callback(av_rescale_q(picref->pts, inlink->time_base, AV_TIME_BASE_Q), &sub, ffsub->opaque);
+
+ if (sub) {
+ int i, j;
+ int r, g, b, y, u, v, a;
+ for (i = 0; i < sub->num_rects; i++)
+ {
+ /* Okay, this is a bit ugly that we use subtitle rect flags to
+ * signal an already computed YUV palette, but since this filter is
+ * private to ffplay, it is probably OK for now. */
+ if (!(sub->rects[i]->flags & 0x1000)) {
+ for (j = 0; j < sub->rects[i]->nb_colors; j++)
+ {
+ RGBA_IN(r, g, b, a, (uint32_t*)sub->rects[i]->pict.data[1] + j);
+ y = RGB_TO_Y_CCIR(r, g, b);
+ u = RGB_TO_U_CCIR(r, g, b, 0);
+ v = RGB_TO_V_CCIR(r, g, b, 0);
+ YUVA_OUT((uint32_t*)sub->rects[i]->pict.data[1] + j, y, u, v, a);
+ }
+ sub->rects[i]->flags |= 0x1000;
+ }
+ }
+
+ if (sub->num_rects)
+ av_frame_make_writable(picref);
+
+ for (i = 0; i < sub->num_rects; i++)
+ blend_subrect((struct AVPicture *)picref, sub->rects[i], picref->width, picref->height);
+ }
+
+ return ff_filter_frame(outlink, picref);
+}
+
+static const AVFilterPad ffplay_subtitle_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = filter_frame,
+ },
+ { NULL }
+};
+
+static const AVFilterPad ffplay_subtitle_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ },
+ { NULL }
+};
+
+AVFilter ff_vf_ffplay_subtitle = {
+ .name = "ffplay_subtitle",
+ .description = NULL_IF_CONFIG_SMALL("Render subtitles onto input video using ffplay callbacks."),
+ .priv_size = sizeof(FFplaySubtitleContext),
+ .query_formats = query_formats,
+ .inputs = ffplay_subtitle_inputs,
+ .outputs = ffplay_subtitle_outputs,
+ .priv_class = &ffplay_subtitle_class,
+};
+
diff --git a/libavfilter/vf_ffplay_subtitle.h b/libavfilter/vf_ffplay_subtitle.h
new file mode 100644
index 0000000..be00a3e
--- /dev/null
+++ b/libavfilter/vf_ffplay_subtitle.h
@@ -0,0 +1,38 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FFplay subtitles burning filter.
+ */
+
+#ifndef AVFILTER_VF_FFPLAY_SUBTITLE_H
+#define AVFILTER_VF_FFPLAY_SUBTITLE_H
+
+#include "avfilter.h"
+#include "libavcodec/avcodec.h"
+
+extern AVFilter ff_vf_ffplay_subtitle;
+
+typedef struct {
+ const AVClass *class;
+ int (*get_subtitle_callback)(int64_t pts, AVSubtitle **sub, void *opaque);
+ void *opaque;
+} FFplaySubtitleContext;
+
+#endif /* AVFILTER_VF_FFPLAY_SUBTITLE_H */
--
2.1.4
More information about the ffmpeg-devel
mailing list