[FFmpeg-devel] [PATCH v3 08/18] fftools/ffmpeg: Replace sub2video with subtitle frame filtering

Soft Works softworkz at hotmail.com
Sat Sep 11 09:02:53 EEST 2021


Signed-off-by: softworkz <softworkz at hotmail.com>
---
 fftools/ffmpeg.c        | 408 +++++++++++++++++-----------------------
 fftools/ffmpeg.h        |  12 +-
 fftools/ffmpeg_filter.c | 198 +++++++++++++------
 fftools/ffmpeg_hw.c     |   2 +-
 fftools/ffmpeg_opt.c    |   3 +-
 5 files changed, 323 insertions(+), 300 deletions(-)

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index a9bb9d964d..36fba765de 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -169,163 +169,6 @@ static int restore_tty;
 static void free_input_threads(void);
 #endif
 
-/* sub2video hack:
-   Convert subtitles to video with alpha to insert them in filter graphs.
-   This is a temporary solution until libavfilter gets real subtitles support.
- */
-
-static int sub2video_get_blank_frame(InputStream *ist)
-{
-    int ret;
-    AVFrame *frame = ist->sub2video.frame;
-
-    av_frame_unref(frame);
-    ist->sub2video.frame->width  = ist->dec_ctx->width  ? ist->dec_ctx->width  : ist->sub2video.w;
-    ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
-    ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
-    if ((ret = av_frame_get_buffer(frame, 0)) < 0)
-        return ret;
-    memset(frame->data[0], 0, frame->height * frame->linesize[0]);
-    return 0;
-}
-
-static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
-                                AVSubtitleRect *r)
-{
-    uint32_t *pal, *dst2;
-    uint8_t *src, *src2;
-    int x, y;
-
-    if (r->type != SUBTITLE_BITMAP) {
-        av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
-        return;
-    }
-    if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
-        av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
-            r->x, r->y, r->w, r->h, w, h
-        );
-        return;
-    }
-
-    dst += r->y * dst_linesize + r->x * 4;
-    src = r->data[0];
-    pal = (uint32_t *)r->data[1];
-    for (y = 0; y < r->h; y++) {
-        dst2 = (uint32_t *)dst;
-        src2 = src;
-        for (x = 0; x < r->w; x++)
-            *(dst2++) = pal[*(src2++)];
-        dst += dst_linesize;
-        src += r->linesize[0];
-    }
-}
-
-static void sub2video_push_ref(InputStream *ist, int64_t pts)
-{
-    AVFrame *frame = ist->sub2video.frame;
-    int i;
-    int ret;
-
-    av_assert1(frame->data[0]);
-    ist->sub2video.last_pts = frame->pts = pts;
-    for (i = 0; i < ist->nb_filters; i++) {
-        ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
-                                           AV_BUFFERSRC_FLAG_KEEP_REF |
-                                           AV_BUFFERSRC_FLAG_PUSH);
-        if (ret != AVERROR_EOF && ret < 0)
-            av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
-                   av_err2str(ret));
-    }
-}
-
-void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
-{
-    AVFrame *frame = ist->sub2video.frame;
-    int8_t *dst;
-    int     dst_linesize;
-    int num_rects, i;
-    int64_t pts, end_pts;
-
-    if (!frame)
-        return;
-    if (sub) {
-        pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
-                                 AV_TIME_BASE_Q, ist->st->time_base);
-        end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000LL,
-                                 AV_TIME_BASE_Q, ist->st->time_base);
-        num_rects = sub->num_rects;
-    } else {
-        /* If we are initializing the system, utilize current heartbeat
-           PTS as the start time, and show until the following subpicture
-           is received. Otherwise, utilize the previous subpicture's end time
-           as the fall-back value. */
-        pts       = ist->sub2video.initialize ?
-                    heartbeat_pts : ist->sub2video.end_pts;
-        end_pts   = INT64_MAX;
-        num_rects = 0;
-    }
-    if (sub2video_get_blank_frame(ist) < 0) {
-        av_log(ist->dec_ctx, AV_LOG_ERROR,
-               "Impossible to get a blank canvas.\n");
-        return;
-    }
-    dst          = frame->data    [0];
-    dst_linesize = frame->linesize[0];
-    for (i = 0; i < num_rects; i++)
-        sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
-    sub2video_push_ref(ist, pts);
-    ist->sub2video.end_pts = end_pts;
-    ist->sub2video.initialize = 0;
-}
-
-static void sub2video_heartbeat(InputStream *ist, int64_t pts)
-{
-    InputFile *infile = input_files[ist->file_index];
-    int i, j, nb_reqs;
-    int64_t pts2;
-
-    /* When a frame is read from a file, examine all sub2video streams in
-       the same file and send the sub2video frame again. Otherwise, decoded
-       video frames could be accumulating in the filter graph while a filter
-       (possibly overlay) is desperately waiting for a subtitle frame. */
-    for (i = 0; i < infile->nb_streams; i++) {
-        InputStream *ist2 = input_streams[infile->ist_index + i];
-        if (!ist2->sub2video.frame)
-            continue;
-        /* subtitles seem to be usually muxed ahead of other streams;
-           if not, subtracting a larger time here is necessary */
-        pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
-        /* do not send the heartbeat frame if the subtitle is already ahead */
-        if (pts2 <= ist2->sub2video.last_pts)
-            continue;
-        if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
-            /* if we have hit the end of the current displayed subpicture,
-               or if we need to initialize the system, update the
-               overlayed subpicture and its start/end times */
-            sub2video_update(ist2, pts2 + 1, NULL);
-        for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
-            nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
-        if (nb_reqs)
-            sub2video_push_ref(ist2, pts2);
-    }
-}
-
-static void sub2video_flush(InputStream *ist)
-{
-    int i;
-    int ret;
-
-    if (ist->sub2video.end_pts < INT64_MAX)
-        sub2video_update(ist, INT64_MAX, NULL);
-    for (i = 0; i < ist->nb_filters; i++) {
-        ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
-        if (ret != AVERROR_EOF && ret < 0)
-            av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
-    }
-}
-
-/* end of sub2video hack */
-
 static void term_exit_sigsafe(void)
 {
 #if HAVE_TERMIOS_H
@@ -526,7 +369,6 @@ static void ffmpeg_cleanup(int ret)
         avfilter_graph_free(&fg->graph);
         for (j = 0; j < fg->nb_inputs; j++) {
             InputFilter *ifilter = fg->inputs[j];
-            struct InputStream *ist = ifilter->ist;
 
             while (av_fifo_size(ifilter->frame_queue)) {
                 AVFrame *frame;
@@ -535,15 +377,6 @@ static void ffmpeg_cleanup(int ret)
                 av_frame_free(&frame);
             }
             av_fifo_freep(&ifilter->frame_queue);
-            if (ist->sub2video.sub_queue) {
-                while (av_fifo_size(ist->sub2video.sub_queue)) {
-                    AVSubtitle sub;
-                    av_fifo_generic_read(ist->sub2video.sub_queue,
-                                         &sub, sizeof(sub), NULL);
-                    avsubtitle_free(&sub);
-                }
-                av_fifo_freep(&ist->sub2video.sub_queue);
-            }
             av_buffer_unref(&ifilter->hw_frames_ctx);
             av_freep(&ifilter->name);
             av_freep(&fg->inputs[j]);
@@ -636,11 +469,13 @@ static void ffmpeg_cleanup(int ret)
         av_packet_free(&ist->pkt);
         av_dict_free(&ist->decoder_opts);
         avsubtitle_free(&ist->prev_sub.subtitle);
-        av_frame_free(&ist->sub2video.frame);
         av_freep(&ist->filters);
         av_freep(&ist->hwaccel_device);
         av_freep(&ist->dts_buffer);
 
+        av_buffer_unref(&ist->subtitle_heartbeat.recent_sub);
+        av_freep(&ist->subtitle_heartbeat.header);
+
         avcodec_free_context(&ist->dec_ctx);
 
         av_freep(&input_streams[i]);
@@ -1062,13 +897,19 @@ error:
 
 static void do_subtitle_out(OutputFile *of,
                             OutputStream *ost,
-                            AVSubtitle *sub)
+                            AVFrame *frame)
 {
     int subtitle_out_max_size = 1024 * 1024;
     int subtitle_out_size, nb, i;
     AVCodecContext *enc;
     AVPacket *pkt = ost->pkt;
     int64_t pts;
+    AVSubtitle *sub = (AVSubtitle *)frame->data[0];
+
+    if (!sub)
+        return;
+
+    av_log(NULL, AV_LOG_TRACE, "do_subtitle_out: sub->pts: %"PRId64"  frame->pts: %"PRId64"\n", sub->pts, frame->pts);
 
     if (sub->pts == AV_NOPTS_VALUE) {
         av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
@@ -1577,8 +1418,11 @@ static int reap_filters(int flush)
                 }
                 do_audio_out(of, ost, filtered_frame);
                 break;
+            case AVMEDIA_TYPE_SUBTITLE:
+
+                do_subtitle_out(of, ost, filtered_frame);
+                break;
             default:
-                // TODO support subtitle filters
                 av_assert0(0);
             }
 
@@ -2174,7 +2018,8 @@ static int ifilter_has_all_input_formats(FilterGraph *fg)
     int i;
     for (i = 0; i < fg->nb_inputs; i++) {
         if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
-                                          fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
+                                          fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO ||
+                                          fg->inputs[i]->type == AVMEDIA_TYPE_SUBTITLE))
             return 0;
     }
     return 1;
@@ -2271,7 +2116,7 @@ static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
         // the filtergraph was never configured
         if (ifilter->format < 0)
             ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
-        if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
+        if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO || ifilter->type == AVMEDIA_TYPE_SUBTITLE)) {
             av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
             return AVERROR_INVALIDDATA;
         }
@@ -2526,30 +2371,136 @@ fail:
     return err < 0 ? err : ret;
 }
 
-static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
+static void subtitle_resend_current(InputStream *ist, int64_t heartbeat_pts)
+{
+    AVFrame *frame;
+    AVBufferRef *sub_buffer;
+    int64_t pts, end_pts;
+
+    frame = av_frame_alloc();
+    if (!frame) {
+        av_log(ist->dec_ctx, AV_LOG_ERROR, "Unable to alloc frame (out of memory).\n");
+        return;
+    }
+
+    frame->format = av_get_subtitle_format_from_codecdesc(ist->dec_ctx->codec_descriptor);
+
+    frame->width = ist->subtitle_heartbeat.w;
+    frame->height = ist->subtitle_heartbeat.h;
+
+    if (ist->subtitle_heartbeat.recent_sub) {
+        AVSubtitle *sub = (AVSubtitle *)ist->subtitle_heartbeat.recent_sub->data;
+        sub_buffer = av_buffer_ref(ist->subtitle_heartbeat.recent_sub);
+
+        pts     = av_rescale_q(sub->pts + sub->start_display_time * 1000LL, AV_TIME_BASE_Q, ist->st->time_base);
+        end_pts = av_rescale_q(sub->pts + sub->end_display_time   * 1000LL, AV_TIME_BASE_Q, ist->st->time_base);
+    }
+    else {
+        AVSubtitle *empty_sub = av_mallocz(sizeof(*empty_sub));
+
+        pts       = ist->subtitle_heartbeat.end_pts <= 0 ? heartbeat_pts : ist->subtitle_heartbeat.end_pts;
+        end_pts   = INT64_MAX;
+
+        empty_sub->format = frame->format;
+        empty_sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
+        empty_sub->end_display_time = 1000;
+        sub_buffer = av_buffer_create((uint8_t*)empty_sub, sizeof(*empty_sub), avsubtitle_free_ref, NULL, AV_BUFFER_FLAG_READONLY);
+    }
+
+    frame->buf[0] = sub_buffer;
+    frame->data[0] = sub_buffer->data;
+    frame->pts = pts;
+    ist->subtitle_heartbeat.last_pts = pts;
+    ist->subtitle_heartbeat.end_pts = end_pts;
+
+    send_frame_to_filters(ist, frame);
+}
+
+static void subtitle_heartbeat(InputStream *ist, int64_t pts)
+{
+    const InputFile *infile = input_files[ist->file_index];
+    int i;
+    int64_t pts2;
+
+    /* When a frame is read from a file, examine all subtitle streams in
+       the same file and send the subtitle frame again. Otherwise, decoded
+       video frames could be accumulating in the filter graph while a filter
+       (possibly overlay) is desperately waiting for a subtitle frame. */
+    for (i = 0; i < infile->nb_streams; i++) {
+        InputStream *ist2 = input_streams[infile->ist_index + i];
+        if (!ist2->subtitle_heartbeat.is_active)
+            continue;
+        /* subtitles seem to be usually muxed ahead of other streams;
+           if not, subtracting a larger time here is necessary */
+        pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
+        /* do not send the heartbeat frame if the subtitle is already ahead */
+        if (pts2 <= ist2->subtitle_heartbeat.last_pts)
+            continue;
+        if (pts2 >= ist2->subtitle_heartbeat.end_pts) {
+            /* if we have hit the end of the current displayed subpicture,
+               or if we need to initialize the system, update the
+               overlayed subpicture and its start/end times */
+            if (ist2->subtitle_heartbeat.recent_sub) {
+                av_buffer_unref(&ist2->subtitle_heartbeat.recent_sub);
+                ist2->subtitle_heartbeat.recent_sub = NULL;
+            }
+            subtitle_resend_current(ist2, pts2 + 1);
+        }
+        //for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
+        //    nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
+        //if (nb_reqs)
+        //    subtitle_heartbeat_resend_current(ist2, pts2);
+    }
+}
+
+static int decode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
                                int *decode_failed)
 {
-    AVSubtitle subtitle;
-    int free_sub = 1;
-    int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
-                                          &subtitle, got_output, pkt);
+    AVFrame *decoded_frame;
+    AVCodecContext *avctx = ist->dec_ctx;
+    int i = 0, ret = 0, err = 0;
+    int64_t pts, end_pts;
+    AVSubtitle *subtitle = av_mallocz(sizeof(*subtitle));
 
-    check_decode_result(NULL, got_output, ret);
+    if (!subtitle)
+        return AVERROR(ENOMEM);
+
+    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+
+    decoded_frame = ist->decoded_frame;
+
+    if (!ist->subtitle_heartbeat.header && avctx->subtitle_header)
+        ist->subtitle_heartbeat.header = av_strdup((char *)avctx->subtitle_header);
+
+    ret = avcodec_decode_subtitle2(avctx, subtitle, got_output, pkt);
+    subtitle->header = ist->subtitle_heartbeat.header;
+
+    if (ret != AVERROR_EOF)
+        check_decode_result(NULL, got_output, ret);
 
     if (ret < 0 || !*got_output) {
         *decode_failed = 1;
-        if (!pkt->size)
-            sub2video_flush(ist);
+        if (!pkt->size) {
+            // Flush
+            for (i = 0; i < ist->nb_filters; i++) {
+                ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
+                if (ret != AVERROR_EOF && ret < 0)
+                    av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
+            }
+        }
         return ret;
     }
 
     if (ist->fix_sub_duration) {
         int end = 1;
         if (ist->prev_sub.got_output) {
-            end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
+            end = av_rescale(subtitle->pts - ist->prev_sub.subtitle.pts,
                              1000, AV_TIME_BASE);
             if (end < ist->prev_sub.subtitle.end_display_time) {
-                av_log(ist->dec_ctx, AV_LOG_DEBUG,
+                av_log(avctx, AV_LOG_DEBUG,
                        "Subtitle duration reduced from %"PRId32" to %d%s\n",
                        ist->prev_sub.subtitle.end_display_time, end,
                        end <= 0 ? ", dropping it" : "");
@@ -2558,51 +2509,52 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
         }
         FFSWAP(int,        *got_output, ist->prev_sub.got_output);
         FFSWAP(int,        ret,         ist->prev_sub.ret);
-        FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
+        FFSWAP(AVSubtitle, *subtitle,    ist->prev_sub.subtitle);
         if (end <= 0)
-            goto out;
+            return end;
     }
 
     if (!*got_output)
         return ret;
 
-    if (ist->sub2video.frame) {
-        sub2video_update(ist, INT64_MIN, &subtitle);
-    } else if (ist->nb_filters) {
-        if (!ist->sub2video.sub_queue)
-            ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
-        if (!ist->sub2video.sub_queue)
-            exit_program(1);
-        if (!av_fifo_space(ist->sub2video.sub_queue)) {
-            ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
-            if (ret < 0)
-                exit_program(1);
-        }
-        av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
-        free_sub = 0;
-    }
+    decoded_frame->format = av_get_subtitle_format_from_codecdesc(avctx->codec_descriptor);
 
-    if (!subtitle.num_rects)
-        goto out;
+    if ((ret = av_frame_get_buffer2(decoded_frame, AVMEDIA_TYPE_SUBTITLE, 0)) < 0)
+        return ret;
 
-    ist->frames_decoded++;
+    av_buffer_unref(&ist->subtitle_heartbeat.recent_sub);
+    ist->subtitle_heartbeat.recent_sub = av_buffer_create((uint8_t*)subtitle, sizeof(*subtitle), avsubtitle_free_ref, NULL, AV_BUFFER_FLAG_READONLY);
 
-    for (i = 0; i < nb_output_streams; i++) {
-        OutputStream *ost = output_streams[i];
+    decoded_frame->buf[0] = av_buffer_ref(ist->subtitle_heartbeat.recent_sub);
+    decoded_frame->data[0] = ist->subtitle_heartbeat.recent_sub->data;
 
-        if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
-            exit_program(1);
-        if (!check_output_constraints(ist, ost) || !ost->encoding_needed
-            || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
-            continue;
+    pts     = av_rescale_q(subtitle->pts + subtitle->start_display_time * 1000LL,
+                           AV_TIME_BASE_Q, ist->st->time_base);
+    end_pts = av_rescale_q(subtitle->pts + subtitle->end_display_time   * 1000LL,
+                             AV_TIME_BASE_Q, ist->st->time_base);
 
-        do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
+    ist->subtitle_heartbeat.last_pts = decoded_frame->pts = pts;
+    ist->subtitle_heartbeat.end_pts = end_pts;
+
+    if (ist->nb_filters == 0) {
+        for (i = 0; i < nb_output_streams; i++) {
+            OutputStream *ost = output_streams[i];
+
+            if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
+                exit_program(1);
+            if (!check_output_constraints(ist, ost) || !ost->encoding_needed
+                || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
+                continue;
+
+            do_subtitle_out(output_files[ost->file_index], ost, decoded_frame);
+        }
     }
+    else
+        err = send_frame_to_filters(ist, decoded_frame);
 
-out:
-    if (free_sub)
-        avsubtitle_free(&subtitle);
-    return ret;
+    av_frame_unref(ist->filter_frame);
+    av_frame_unref(decoded_frame);
+    return err < 0 ? err : ret;
 }
 
 static int send_filter_eof(InputStream *ist)
@@ -2710,7 +2662,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
         case AVMEDIA_TYPE_SUBTITLE:
             if (repeating)
                 break;
-            ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
+            ret = decode_subtitles(ist, avpkt, &got_output, &decode_failed);
             if (!pkt && ret >= 0)
                 ret = AVERROR_EOF;
             av_packet_unref(avpkt);
@@ -2983,8 +2935,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
                 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
         }
 
-        av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
-
         /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
          * audio, and video decoders such as cuvid or mediacodec */
         ist->dec_ctx->pkt_timebase = ist->st->time_base;
@@ -3582,19 +3532,14 @@ static int init_output_stream(OutputStream *ost, AVFrame *frame,
         }
 
         if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
-            int input_props = 0, output_props = 0;
-            AVCodecDescriptor const *input_descriptor =
-                avcodec_descriptor_get(dec->codec_id);
-            AVCodecDescriptor const *output_descriptor =
-                avcodec_descriptor_get(ost->enc_ctx->codec_id);
-            if (input_descriptor)
-                input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
-            if (output_descriptor)
-                output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
-            if (input_props && output_props && input_props != output_props) {
-                snprintf(error, error_len,
-                         "Subtitle encoding currently only possible from text to text "
-                         "or bitmap to bitmap");
+            AVCodecDescriptor const *input_descriptor     = avcodec_descriptor_get(dec->codec_id);
+            AVCodecDescriptor const *output_descriptor    = avcodec_descriptor_get(ost->enc_ctx->codec_id);
+            const enum AVSubtitleType in_subtitle_format  = output_descriptor ? av_get_subtitle_format_from_codecdesc(input_descriptor) : AV_SUBTITLE_FMT_UNKNOWN;
+            const enum AVSubtitleType out_subtitle_format = output_descriptor ? av_get_subtitle_format_from_codecdesc(output_descriptor) : AV_SUBTITLE_FMT_UNKNOWN;
+
+            if (in_subtitle_format != AV_SUBTITLE_FMT_UNKNOWN && out_subtitle_format != AV_SUBTITLE_FMT_UNKNOWN
+                && in_subtitle_format != out_subtitle_format) {
+                snprintf(error, error_len, "Subtitle encoding is only possible from text to text or bitmap to bitmap");
                 return AVERROR_INVALIDDATA;
             }
         }
@@ -4627,7 +4572,7 @@ static int process_input(int file_index)
                av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
     }
 
-    sub2video_heartbeat(ist, pkt->pts);
+    subtitle_heartbeat(ist, pkt->pts);
 
     process_input_packet(ist, pkt, 0);
 
@@ -4839,6 +4784,7 @@ static int transcode(void)
     /* at the end of stream, we must flush the decoder buffers */
     for (i = 0; i < nb_input_streams; i++) {
         ist = input_streams[i];
+        ist->subtitle_heartbeat.is_active = 0;
         if (!input_files[ist->file_index]->eof_reached) {
             process_input_packet(ist, NULL, 0);
         }
diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h
index 1194bb0cae..d1c6ae9769 100644
--- a/fftools/ffmpeg.h
+++ b/fftools/ffmpeg.h
@@ -351,14 +351,14 @@ typedef struct InputStream {
         AVSubtitle subtitle;
     } prev_sub;
 
-    struct sub2video {
+    struct subtitle_heartbeat {
+        int is_active;
         int64_t last_pts;
         int64_t end_pts;
-        AVFifoBuffer *sub_queue;    ///< queue of AVSubtitle* before filter init
-        AVFrame *frame;
+        AVBufferRef *recent_sub;
         int w, h;
-        unsigned int initialize; ///< marks if sub2video_update should force an initialization
-    } sub2video;
+        char *header;
+    } subtitle_heartbeat;
 
     /* decoded data from this stream goes into all those filters
      * currently video and audio only */
@@ -660,8 +660,6 @@ int filtergraph_is_simple(FilterGraph *fg);
 int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
 int init_complex_filtergraph(FilterGraph *fg);
 
-void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub);
-
 int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
 
 int ffmpeg_parse_options(int argc, char **argv);
diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c
index 6b7b6ca1b3..983b49cb8b 100644
--- a/fftools/ffmpeg_filter.c
+++ b/fftools/ffmpeg_filter.c
@@ -22,6 +22,9 @@
 
 #include "ffmpeg.h"
 
+#include <libavcodec/ass_split.h>
+#include <libavfilter/internal.h>
+
 #include "libavfilter/avfilter.h"
 #include "libavfilter/buffersink.h"
 #include "libavfilter/buffersrc.h"
@@ -221,9 +224,8 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
     enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
     int i;
 
-    // TODO: support other filter types
-    if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
-        av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
+    if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO && type != AVMEDIA_TYPE_SUBTITLE) {
+        av_log(NULL, AV_LOG_FATAL, "Only video, audio and subtitle filters supported "
                "currently.\n");
         exit_program(1);
     }
@@ -244,8 +246,9 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
         for (i = 0; i < s->nb_streams; i++) {
             enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
             if (stream_type != type &&
-                !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
-                  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
+                // in the followng case we auto-insert the graphicsub2video conversion filter 
+                // for retaining compatibility with the previous sub2video hack
+                !(stream_type == AVMEDIA_TYPE_SUBTITLE && type == AVMEDIA_TYPE_VIDEO))
                 continue;
             if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
                 st = s->streams[i];
@@ -416,6 +419,37 @@ static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
     return 0;
 }
 
+static int configure_output_subtitle_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
+{
+    OutputStream *ost = ofilter->ost;
+    AVFilterContext *last_filter = out->filter_ctx;
+    int pad_idx = out->pad_idx;
+    int ret;
+    char name[255];
+
+    snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
+    ret = avfilter_graph_create_filter(&ofilter->filter,
+                                       avfilter_get_by_name("sbuffersink"),
+                                       name, NULL, NULL, fg->graph);
+
+    if (ret < 0)
+        return ret;
+
+    ////snprintf(name, sizeof(name), "trim_out_%d_%d",
+    ////         ost->file_index, ost->index);
+    ////ret = insert_trim(of->start_time, of->recording_time,
+    ////                  &last_filter, &pad_idx, name);
+    ////if (ret < 0)
+    ////    return ret;
+
+    ////ost->st->codecpar->codec_tag = MKTAG('a', 's', 's', 's');
+
+    if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
+        return ret;
+
+    return 0;
+}
+
 static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
 {
     char *pix_fmts;
@@ -594,7 +628,8 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
         int i;
 
         for (i=0; i<of->ctx->nb_streams; i++)
-            if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
+            if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
+                of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
                 break;
 
         if (i<of->ctx->nb_streams) {
@@ -628,6 +663,7 @@ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
     switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
     case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
     case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
+    case AVMEDIA_TYPE_SUBTITLE: return configure_output_subtitle_filter(fg, ofilter, out);
     default: av_assert0(0); return 0;
     }
 }
@@ -647,51 +683,103 @@ void check_filter_outputs(void)
     }
 }
 
-static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
+static int configure_input_subtitle_filter(FilterGraph *fg, InputFilter *ifilter,
+                                        AVFilterInOut *in)
 {
-    AVFormatContext *avf = input_files[ist->file_index]->ctx;
-    int i, w, h;
+    AVFilterContext *last_filter;
+    const AVFilter *buffer_filt = avfilter_get_by_name("sbuffer");
+    InputStream *ist = ifilter->ist;
+    AVBPrint args;
+    char name[255];
+    int ret, pad_idx = 0;
+    int w, h;
+    AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
+
+    if (!par)
+        return AVERROR(ENOMEM);
+    memset(par, 0, sizeof(*par));
+    par->format = AV_PIX_FMT_NONE;
+
+    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+        av_log(NULL, AV_LOG_ERROR, "Cannot connect subtitle filter to audio input\n");
+        ret = AVERROR(EINVAL);
+        goto fail;
+    }
+
+    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+        av_log(NULL, AV_LOG_ERROR, "Cannot connect subtitle filter to video input\n");
+        ret = AVERROR(EINVAL);
+        goto fail;
+    }
+
+    if (!ist->subtitle_heartbeat.header && ist->dec_ctx->subtitle_header)
+        ist->subtitle_heartbeat.header = av_strdup((char *)ist->dec_ctx->subtitle_header);
+
+    ist->subtitle_heartbeat.is_active = 1;
 
-    /* Compute the size of the canvas for the subtitles stream.
-       If the subtitles codecpar has set a size, use it. Otherwise use the
-       maximum dimensions of the video streams in the same file. */
     w = ifilter->width;
     h = ifilter->height;
+
     if (!(w && h)) {
-        for (i = 0; i < avf->nb_streams; i++) {
-            if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
-                w = FFMAX(w, avf->streams[i]->codecpar->width);
-                h = FFMAX(h, avf->streams[i]->codecpar->height);
-            }
-        }
-        if (!(w && h)) {
-            w = FFMAX(w, 720);
-            h = FFMAX(h, 576);
-        }
-        av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
+        w = ist->dec_ctx->width;
+        h = ist->dec_ctx->height;
     }
-    ist->sub2video.w = ifilter->width  = w;
-    ist->sub2video.h = ifilter->height = h;
 
-    ifilter->width  = ist->dec_ctx->width  ? ist->dec_ctx->width  : ist->sub2video.w;
-    ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
+    if (!(w && h) && ist->dec_ctx->subtitle_header) {
+        ASSSplitContext *ass_ctx = ff_ass_split((char *)ist->dec_ctx->subtitle_header);
+        ASS *ass = (ASS *)ass_ctx;
+        w = ass->script_info.play_res_x;
+        h = ass->script_info.play_res_y;
+        ff_ass_split_free(ass_ctx);
+    }
 
-    /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
-       palettes for all rectangles are identical or compatible */
-    ifilter->format = AV_PIX_FMT_RGB32;
+    ist->subtitle_heartbeat.w = w;
+    ist->subtitle_heartbeat.h = h;
+    av_log(ifilter, AV_LOG_INFO, "subtitle input filter: decoding size %dx%d\n", ist->subtitle_heartbeat.w, ist->subtitle_heartbeat.h);
 
-    ist->sub2video.frame = av_frame_alloc();
-    if (!ist->sub2video.frame)
-        return AVERROR(ENOMEM);
-    ist->sub2video.last_pts = INT64_MIN;
-    ist->sub2video.end_pts  = INT64_MIN;
+    ifilter->width = w;
+    ifilter->height = h;
+
+    ist->subtitle_heartbeat.last_pts = INT64_MIN;
+
+    snprintf(name, sizeof(name), "graph %d subtitle input from stream %d:%d", fg->index,
+             ist->file_index, ist->st->index);
+
+
+    av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
+    av_bprintf(&args,
+             "subtitle_type=%d:time_base=%d/%d:",
+             ifilter->format,
+             ist->st->time_base.num, ist->st->time_base.den);
+    if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
+                                            args.str, NULL, fg->graph)) < 0)
+        goto fail;
 
-    /* sub2video structure has been (re-)initialized.
-       Mark it as such so that the system will be
-       initialized with the first received heartbeat. */
-    ist->sub2video.initialize = 1;
+    par->hw_frames_ctx = ifilter->hw_frames_ctx;
+    par->format = ifilter->format;
+    par->width = ifilter->width;
+    par->height = ifilter->height;
+
+    ret = av_buffersrc_parameters_set(ifilter->filter, par);
+    if (ret < 0)
+        goto fail;
+    av_freep(&par);
+    last_filter = ifilter->filter;
+
+    if (in->filter_ctx->input_pads[in->pad_idx].type == AVMEDIA_TYPE_VIDEO) {
+        ret = insert_filter(&last_filter, &pad_idx, "graphicsub2video", NULL);
+        if (ret < 0)
+            return ret;
+    }
+
+    if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
+        return ret;
 
     return 0;
+fail:
+    av_freep(&par);
+
+    return ret;
 }
 
 static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
@@ -709,8 +797,15 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
     char name[255];
     int ret, pad_idx = 0;
     int64_t tsoffset = 0;
-    AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
+    AVBufferSrcParameters *par;
 
+    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+        // Automatically insert conversion filter to retain compatibility
+        // with sub2video command lines
+        return configure_input_subtitle_filter(fg, ifilter, in);
+    }
+
+    par = av_buffersrc_parameters_alloc();
     if (!par)
         return AVERROR(ENOMEM);
     memset(par, 0, sizeof(*par));
@@ -725,12 +820,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
     if (!fr.num)
         fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
 
-    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
-        ret = sub2video_prepare(ist, ifilter);
-        if (ret < 0)
-            goto fail;
-    }
-
     sar = ifilter->sample_aspect_ratio;
     if(!sar.den)
         sar = (AVRational){0,1};
@@ -742,7 +831,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
              tb.num, tb.den, sar.num, sar.den);
     if (fr.num && fr.den)
         av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
-    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
+    snprintf(name, sizeof(name), "graph %d video input from stream %d:%d", fg->index,
              ist->file_index, ist->st->index);
 
 
@@ -938,6 +1027,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
     switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
     case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
     case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
+    case AVMEDIA_TYPE_SUBTITLE: return configure_input_subtitle_filter(fg, ifilter, in);
     default: av_assert0(0); return 0;
     }
 }
@@ -1105,19 +1195,6 @@ int configure_filtergraph(FilterGraph *fg)
         }
     }
 
-    /* process queued up subtitle packets */
-    for (i = 0; i < fg->nb_inputs; i++) {
-        InputStream *ist = fg->inputs[i]->ist;
-        if (ist->sub2video.sub_queue && ist->sub2video.frame) {
-            while (av_fifo_size(ist->sub2video.sub_queue)) {
-                AVSubtitle tmp;
-                av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
-                sub2video_update(ist, INT64_MIN, &tmp);
-                avsubtitle_free(&tmp);
-            }
-        }
-    }
-
     return 0;
 
 fail:
@@ -1138,6 +1215,7 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
     ifilter->sample_rate         = frame->sample_rate;
     ifilter->channels            = frame->channels;
     ifilter->channel_layout      = frame->channel_layout;
+    ifilter->type                = frame->type;
 
     if (frame->hw_frames_ctx) {
         ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
diff --git a/fftools/ffmpeg_hw.c b/fftools/ffmpeg_hw.c
index 41aaf776d7..f8b49fd5ac 100644
--- a/fftools/ffmpeg_hw.c
+++ b/fftools/ffmpeg_hw.c
@@ -449,7 +449,7 @@ int hw_device_setup_for_encode(OutputStream *ost)
     AVBufferRef *frames_ref = NULL;
     int i;
 
-    if (ost->filter) {
+    if (ost->filter && ost->filter->filter) {
         frames_ref = av_buffersink_get_hw_frames_ctx(ost->filter->filter);
         if (frames_ref &&
             ((AVHWFramesContext*)frames_ref->data)->format ==
diff --git a/fftools/ffmpeg_opt.c b/fftools/ffmpeg_opt.c
index 6ca28cf974..21b5228bb2 100644
--- a/fftools/ffmpeg_opt.c
+++ b/fftools/ffmpeg_opt.c
@@ -2149,8 +2149,9 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
     switch (ofilter->type) {
     case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc, -1); break;
     case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc, -1); break;
+    case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc, -1); break;
     default:
-        av_log(NULL, AV_LOG_FATAL, "Only video and audio filters are supported "
+        av_log(NULL, AV_LOG_FATAL, "Only video, audio and subtitle filters are supported "
                "currently.\n");
         exit_program(1);
     }
-- 
2.30.2.windows.1



More information about the ffmpeg-devel mailing list