[FFmpeg-cvslog] fftools/ffmpeg: use AVFrame to pass subtitles from decoders to filters
Anton Khirnov
git at videolan.org
Mon Jun 19 12:11:34 EEST 2023
ffmpeg | branch: master | Anton Khirnov <anton at khirnov.net> | Wed Jun 14 18:08:10 2023 +0200| [88f80977eb571044e2bc157fe1b60ac63061eba0] | committer: Anton Khirnov
fftools/ffmpeg: use AVFrame to pass subtitles from decoders to filters
Allows to use the same buffering code for all media types. Will also be
important for the following commit.
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=88f80977eb571044e2bc157fe1b60ac63061eba0
---
fftools/ffmpeg.h | 2 +-
fftools/ffmpeg_dec.c | 88 +++++++++++++++++++++++++++++++------------------
fftools/ffmpeg_filter.c | 51 ++++++++--------------------
3 files changed, 70 insertions(+), 71 deletions(-)
diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h
index 3da5bc4f7c..316cd2b7a6 100644
--- a/fftools/ffmpeg.h
+++ b/fftools/ffmpeg.h
@@ -736,7 +736,7 @@ FrameData *frame_data(AVFrame *frame);
int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference);
int ifilter_send_eof(InputFilter *ifilter, int64_t pts, AVRational tb);
-int ifilter_sub2video(InputFilter *ifilter, const AVSubtitle *sub);
+int ifilter_sub2video(InputFilter *ifilter, const AVFrame *frame);
void ifilter_sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational tb);
/**
diff --git a/fftools/ffmpeg_dec.c b/fftools/ffmpeg_dec.c
index 36f418eec8..3722a87bc5 100644
--- a/fftools/ffmpeg_dec.c
+++ b/fftools/ffmpeg_dec.c
@@ -47,11 +47,9 @@ struct Decoder {
int64_t last_filter_in_rescale_delta;
int last_frame_sample_rate;
- /* previous decoded subtitle and related variables */
- struct {
- int got_output;
- AVSubtitle subtitle;
- } prev_sub;
+ /* previous decoded subtitles */
+ AVFrame *sub_prev[2];
+ AVFrame *sub_heartbeat;
pthread_t thread;
/**
@@ -108,7 +106,9 @@ void dec_free(Decoder **pdec)
av_frame_free(&dec->frame);
av_packet_free(&dec->pkt);
- avsubtitle_free(&dec->prev_sub.subtitle);
+ for (int i = 0; i < FF_ARRAY_ELEMS(dec->sub_prev); i++)
+ av_frame_free(&dec->sub_prev[i]);
+ av_frame_free(&dec->sub_heartbeat);
av_freep(pdec);
}
@@ -384,45 +384,55 @@ static void sub2video_flush(InputStream *ist)
}
}
-static int process_subtitle(InputStream *ist, AVSubtitle *subtitle)
+static int process_subtitle(InputStream *ist, AVFrame *frame)
{
Decoder *d = ist->decoder;
- int got_output = 1;
+ const AVSubtitle *subtitle = (AVSubtitle*)frame->buf[0]->data;
int ret = 0;
if (ist->fix_sub_duration) {
+ AVSubtitle *sub_prev = d->sub_prev[0]->buf[0] ?
+ (AVSubtitle*)d->sub_prev[0]->buf[0]->data : NULL;
int end = 1;
- if (d->prev_sub.got_output) {
- end = av_rescale(subtitle->pts - d->prev_sub.subtitle.pts,
+ if (sub_prev) {
+ end = av_rescale(subtitle->pts - sub_prev->pts,
1000, AV_TIME_BASE);
- if (end < d->prev_sub.subtitle.end_display_time) {
+ if (end < sub_prev->end_display_time) {
av_log(NULL, AV_LOG_DEBUG,
"Subtitle duration reduced from %"PRId32" to %d%s\n",
- d->prev_sub.subtitle.end_display_time, end,
+ sub_prev->end_display_time, end,
end <= 0 ? ", dropping it" : "");
- d->prev_sub.subtitle.end_display_time = end;
+ sub_prev->end_display_time = end;
}
}
- FFSWAP(int, got_output, d->prev_sub.got_output);
- FFSWAP(AVSubtitle, *subtitle, d->prev_sub.subtitle);
+
+ av_frame_unref(d->sub_prev[1]);
+ av_frame_move_ref(d->sub_prev[1], frame);
+
+ frame = d->sub_prev[0];
+ subtitle = frame->buf[0] ? (AVSubtitle*)frame->buf[0]->data : NULL;
+
+ FFSWAP(AVFrame*, d->sub_prev[0], d->sub_prev[1]);
+
if (end <= 0)
- goto out;
+ return 0;
}
- if (!got_output)
+ if (!subtitle)
return 0;
for (int i = 0; i < ist->nb_filters; i++) {
- ret = ifilter_sub2video(ist->filters[i], subtitle);
+ ret = ifilter_sub2video(ist->filters[i], frame);
if (ret < 0) {
av_log(ist, AV_LOG_ERROR, "Error sending a subtitle for filtering: %s\n",
av_err2str(ret));
- goto out;
+ return ret;
}
}
+ subtitle = (AVSubtitle*)frame->buf[0]->data;
if (!subtitle->num_rects)
- goto out;
+ return 0;
for (int oidx = 0; oidx < ist->nb_outputs; oidx++) {
OutputStream *ost = ist->outputs[oidx];
@@ -432,28 +442,30 @@ static int process_subtitle(InputStream *ist, AVSubtitle *subtitle)
enc_subtitle(output_files[ost->file_index], ost, subtitle);
}
-out:
- avsubtitle_free(subtitle);
- return ret;
+ return 0;
}
int fix_sub_duration_heartbeat(InputStream *ist, int64_t signal_pts)
{
Decoder *d = ist->decoder;
int ret = AVERROR_BUG;
- AVSubtitle *prev_subtitle = &d->prev_sub.subtitle;
- AVSubtitle subtitle;
+ AVSubtitle *prev_subtitle = d->sub_prev[0]->buf[0] ?
+ (AVSubtitle*)d->sub_prev[0]->buf[0]->data : NULL;
+ AVSubtitle *subtitle;
- if (!ist->fix_sub_duration || !prev_subtitle->num_rects ||
- signal_pts <= prev_subtitle->pts)
+ if (!ist->fix_sub_duration || !prev_subtitle ||
+ !prev_subtitle->num_rects || signal_pts <= prev_subtitle->pts)
return 0;
- if ((ret = copy_av_subtitle(&subtitle, prev_subtitle)) < 0)
+ av_frame_unref(d->sub_heartbeat);
+ ret = subtitle_wrap_frame(d->sub_heartbeat, prev_subtitle, 1);
+ if (ret < 0)
return ret;
- subtitle.pts = signal_pts;
+ subtitle = (AVSubtitle*)d->sub_heartbeat->buf[0]->data;
+ subtitle->pts = signal_pts;
- return process_subtitle(ist, &subtitle);
+ return process_subtitle(ist, d->sub_heartbeat);
}
static int transcode_subtitles(InputStream *ist, const AVPacket *pkt,
@@ -781,8 +793,7 @@ int dec_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
// process the decoded frame
if (ist->dec->type == AVMEDIA_TYPE_SUBTITLE) {
- AVSubtitle *sub = (AVSubtitle*)d->frame->buf[0]->data;
- ret = process_subtitle(ist, sub);
+ ret = process_subtitle(ist, d->frame);
} else {
ret = send_frame_to_filters(ist, d->frame);
}
@@ -1043,6 +1054,7 @@ static int hw_device_setup_for_decode(InputStream *ist)
int dec_open(InputStream *ist)
{
+ Decoder *d;
const AVCodec *codec = ist->dec;
int ret;
@@ -1056,6 +1068,18 @@ int dec_open(InputStream *ist)
ret = dec_alloc(&ist->decoder);
if (ret < 0)
return ret;
+ d = ist->decoder;
+
+ if (codec->type == AVMEDIA_TYPE_SUBTITLE && ist->fix_sub_duration) {
+ for (int i = 0; i < FF_ARRAY_ELEMS(d->sub_prev); i++) {
+ d->sub_prev[i] = av_frame_alloc();
+ if (!d->sub_prev[i])
+ return AVERROR(ENOMEM);
+ }
+ d->sub_heartbeat = av_frame_alloc();
+ if (!d->sub_heartbeat)
+ return AVERROR(ENOMEM);
+ }
ist->dec_ctx->opaque = ist;
ist->dec_ctx->get_format = get_format;
diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c
index 9e6883ccdd..acc8596836 100644
--- a/fftools/ffmpeg_filter.c
+++ b/fftools/ffmpeg_filter.c
@@ -118,9 +118,6 @@ typedef struct InputFilterPriv {
} fallback;
struct {
- ///< queue of AVSubtitle* before filter init
- AVFifo *queue;
-
AVFrame *frame;
int64_t last_pts;
@@ -749,12 +746,6 @@ void fg_free(FilterGraph **pfg)
av_frame_free(&frame);
av_fifo_freep2(&ifp->frame_queue);
}
- if (ifp->sub2video.queue) {
- AVSubtitle sub;
- while (av_fifo_read(ifp->sub2video.queue, &sub, 1) >= 0)
- avsubtitle_free(&sub);
- av_fifo_freep2(&ifp->sub2video.queue);
- }
av_frame_free(&ifp->sub2video.frame);
av_channel_layout_uninit(&ifp->fallback.ch_layout);
@@ -1593,7 +1584,11 @@ static int configure_filtergraph(FilterGraph *fg)
InputFilterPriv *ifp = ifp_from_ifilter(fg->inputs[i]);
AVFrame *tmp;
while (av_fifo_read(ifp->frame_queue, &tmp, 1) >= 0) {
- ret = av_buffersrc_add_frame(ifp->filter, tmp);
+ if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE) {
+ sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)tmp->buf[0]->data);
+ } else {
+ ret = av_buffersrc_add_frame(ifp->filter, tmp);
+ }
av_frame_free(&tmp);
if (ret < 0)
goto fail;
@@ -1610,20 +1605,6 @@ static int configure_filtergraph(FilterGraph *fg)
}
}
- /* process queued up subtitle packets */
- for (i = 0; i < fg->nb_inputs; i++) {
- InputFilter *ifilter = fg->inputs[i];
- InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
-
- if (ifp->type_src == AVMEDIA_TYPE_SUBTITLE && ifp->sub2video.queue) {
- AVSubtitle tmp;
- while (av_fifo_read(ifp->sub2video.queue, &tmp, 1) >= 0) {
- sub2video_update(ifp, INT64_MIN, &tmp);
- avsubtitle_free(&tmp);
- }
- }
- }
-
return 0;
fail:
@@ -1797,35 +1778,29 @@ void ifilter_sub2video_heartbeat(InputFilter *ifilter, int64_t pts, AVRational t
sub2video_push_ref(ifp, pts2);
}
-int ifilter_sub2video(InputFilter *ifilter, const AVSubtitle *subtitle)
+int ifilter_sub2video(InputFilter *ifilter, const AVFrame *frame)
{
InputFilterPriv *ifp = ifp_from_ifilter(ifilter);
int ret;
if (ifilter->graph->graph) {
- if (!subtitle) {
+ if (!frame) {
if (ifp->sub2video.end_pts < INT64_MAX)
sub2video_update(ifp, INT64_MAX, NULL);
return av_buffersrc_add_frame(ifp->filter, NULL);
}
- sub2video_update(ifp, INT64_MIN, subtitle);
- } else if (subtitle) {
- AVSubtitle sub;
+ sub2video_update(ifp, INT64_MIN, (const AVSubtitle*)frame->buf[0]->data);
+ } else if (frame) {
+ AVFrame *tmp = av_frame_clone(frame);
- if (!ifp->sub2video.queue)
- ifp->sub2video.queue = av_fifo_alloc2(8, sizeof(AVSubtitle), AV_FIFO_FLAG_AUTO_GROW);
- if (!ifp->sub2video.queue)
+ if (!tmp)
return AVERROR(ENOMEM);
- ret = copy_av_subtitle(&sub, subtitle);
- if (ret < 0)
- return ret;
-
- ret = av_fifo_write(ifp->sub2video.queue, &sub, 1);
+ ret = av_fifo_write(ifp->frame_queue, &tmp, 1);
if (ret < 0) {
- avsubtitle_free(&sub);
+ av_frame_free(&tmp);
return ret;
}
}
More information about the ffmpeg-cvslog
mailing list