[FFmpeg-devel] [PATCH] avfilter: add (a)separate filters

Paul B Mahol onemda at gmail.com
Sun Aug 1 17:21:39 EEST 2021


Signed-off-by: Paul B Mahol <onemda at gmail.com>
---
 doc/filters.texi         |  31 ++++
 libavfilter/Makefile     |   2 +
 libavfilter/allfilters.c |   2 +
 libavfilter/f_separate.c | 346 +++++++++++++++++++++++++++++++++++++++
 4 files changed, 381 insertions(+)
 create mode 100644 libavfilter/f_separate.c

diff --git a/doc/filters.texi b/doc/filters.texi
index 635179edb9..e40322417b 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -26067,6 +26067,37 @@ sendcmd=f=test.cmd,drawtext=fontfile=FreeSerif.ttf:text='',hue
 @end example
 @end itemize
 
+ at section separate, aseparate
+
+Separate single input stream into multiple streams.
+
+ at code{separate} works on video frames, @code{aseparate} on audio samples.
+
+This filter accepts the following options:
+
+ at table @option
+ at item durations
+Durations of input at which to separate input. Each duration point is separated by '|'.
+
+ at item frames, samples
+Exact frame/sample at which to do separations of input video/audio stream. Each point
+is separated by '|'.
+ at end table
+
+ at subsection Examples
+
+ at itemize
+
+ at item
+Separate input audio stream into three output audio streams, starting at start of input audio stream
+and storing that in 1st output audio stream, then following at 60th second and storing than in 2nd
+output audio stream, and last after 120th second of input audio stream store in 3rd output audio stream:
+ at example
+aseparate=durations="60 | 120"
+ at end example
+
+ at end itemize
+
 @anchor{setpts}
 @section setpts, asetpts
 
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 772971521c..73c26f4870 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -80,6 +80,7 @@ OBJS-$(CONFIG_AREVERSE_FILTER)               += f_reverse.o
 OBJS-$(CONFIG_ARNNDN_FILTER)                 += af_arnndn.o
 OBJS-$(CONFIG_ASELECT_FILTER)                += f_select.o
 OBJS-$(CONFIG_ASENDCMD_FILTER)               += f_sendcmd.o
+OBJS-$(CONFIG_ASEPARATE_FILTER)              += f_separate.o
 OBJS-$(CONFIG_ASETNSAMPLES_FILTER)           += af_asetnsamples.o
 OBJS-$(CONFIG_ASETPTS_FILTER)                += setpts.o
 OBJS-$(CONFIG_ASETRATE_FILTER)               += af_asetrate.o
@@ -408,6 +409,7 @@ OBJS-$(CONFIG_SCROLL_FILTER)                 += vf_scroll.o
 OBJS-$(CONFIG_SELECT_FILTER)                 += f_select.o
 OBJS-$(CONFIG_SELECTIVECOLOR_FILTER)         += vf_selectivecolor.o
 OBJS-$(CONFIG_SENDCMD_FILTER)                += f_sendcmd.o
+OBJS-$(CONFIG_SEPARATE_FILTER)               += f_separate.o
 OBJS-$(CONFIG_SEPARATEFIELDS_FILTER)         += vf_separatefields.o
 OBJS-$(CONFIG_SETDAR_FILTER)                 += vf_aspect.o
 OBJS-$(CONFIG_SETFIELD_FILTER)               += vf_setparams.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index e0276eb98c..c11c680564 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -73,6 +73,7 @@ extern const AVFilter ff_af_areverse;
 extern const AVFilter ff_af_arnndn;
 extern const AVFilter ff_af_aselect;
 extern const AVFilter ff_af_asendcmd;
+extern const AVFilter ff_af_aseparate;
 extern const AVFilter ff_af_asetnsamples;
 extern const AVFilter ff_af_asetpts;
 extern const AVFilter ff_af_asetrate;
@@ -389,6 +390,7 @@ extern const AVFilter ff_vf_scroll;
 extern const AVFilter ff_vf_select;
 extern const AVFilter ff_vf_selectivecolor;
 extern const AVFilter ff_vf_sendcmd;
+extern const AVFilter ff_vf_separate;
 extern const AVFilter ff_vf_separatefields;
 extern const AVFilter ff_vf_setdar;
 extern const AVFilter ff_vf_setfield;
diff --git a/libavfilter/f_separate.c b/libavfilter/f_separate.c
new file mode 100644
index 0000000000..46f8871c8a
--- /dev/null
+++ b/libavfilter/f_separate.c
@@ -0,0 +1,346 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "libavutil/avstring.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/common.h"
+#include "libavutil/log.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/samplefmt.h"
+
+#include "audio.h"
+#include "avfilter.h"
+#include "filters.h"
+#include "internal.h"
+
+typedef struct SeparateContext {
+    const AVClass *class;
+
+    char *durations_str;
+    char *points_str;
+
+    int current_point;
+    int nb_points;
+
+    int64_t *points;
+
+    int64_t current_frame;
+    int64_t current_sample;
+} SeparateContext;
+
+static void count_points(char *item_str, int *nb_items)
+{
+    char *p;
+
+    if (!item_str)
+        return;
+
+    *nb_items = 1;
+    for (p = item_str; *p; p++) {
+        if (*p == '|')
+            (*nb_items)++;
+    }
+}
+
+static int parse_durations(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)
+{
+    char *arg, *p = item_str;
+    char *saveptr = NULL;
+    int ret;
+
+    for (int i = 0; i < nb_points; i++) {
+        if (!(arg = av_strtok(p, "|", &saveptr)))
+            return AVERROR(EINVAL);
+
+        p = NULL;
+
+        ret = av_parse_time(&points[i], arg, 1);
+        if (ret < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Invalid durations supplied: %s\n", arg);
+            return AVERROR(EINVAL);
+        }
+    }
+
+    return 0;
+}
+
+static int parse_points(AVFilterContext *ctx, char *item_str, int nb_points, int64_t *points)
+{
+    char *arg, *p = item_str;
+    char *saveptr = NULL;
+
+    for (int i = 0; i < nb_points; i++) {
+        if (!(arg = av_strtok(p, "|", &saveptr)))
+            return AVERROR(EINVAL);
+
+        p = NULL;
+
+        if (sscanf(arg, "%"PRId64, &points[i]) != 1) {
+            av_log(ctx, AV_LOG_ERROR, "Invalid points supplied: %s\n", arg);
+            return AVERROR(EINVAL);
+        }
+    }
+
+    return 0;
+}
+
+static av_cold int init(AVFilterContext *ctx, int type)
+{
+    SeparateContext *s = ctx->priv;
+    int ret;
+
+    if (s->durations_str) {
+        count_points(s->durations_str, &s->nb_points);
+        s->nb_points++;
+    } else {
+        count_points(s->points_str, &s->nb_points);
+        s->nb_points++;
+    }
+
+    s->points = av_calloc(s->nb_points, sizeof(*s->points));
+    if (!s->points)
+        return AVERROR(ENOMEM);
+
+    if (s->durations_str) {
+        ret = parse_durations(ctx, s->durations_str, s->nb_points - 1, s->points);
+        if (ret < 0)
+            return ret;
+    } else {
+        ret = parse_points(ctx, s->points_str, s->nb_points - 1, s->points);
+        if (ret < 0)
+            return ret;
+    }
+
+    s->points[s->nb_points - 1] = INT64_MAX;
+
+    for (int i = 0; i < s->nb_points; i++) {
+        AVFilterPad pad = { 0 };
+
+        pad.type = type;
+        pad.name = av_asprintf("output%d", i);
+        if (!pad.name)
+            return AVERROR(ENOMEM);
+
+        if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) {
+            av_freep(&pad.name);
+            return ret;
+        }
+    }
+
+    return 0;
+}
+
+static int config_input(AVFilterLink *inlink)
+{
+    AVFilterContext *ctx = inlink->dst;
+    SeparateContext *s = ctx->priv;
+    AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
+                     inlink->time_base : (AVRational){ 1, inlink->sample_rate };
+
+    for (int i = 0; i < s->nb_points - 1; i++) {
+        int64_t pts = av_rescale_q(s->points[i], AV_TIME_BASE_Q, tb);
+
+        s->points[i] = pts;
+    }
+
+    return 0;
+}
+
+static av_cold void uninit(AVFilterContext *ctx)
+{
+    SeparateContext *s = ctx->priv;
+
+    av_freep(&s->points);
+
+    for (int i = 0; i < ctx->nb_outputs; i++)
+        av_freep(&ctx->output_pads[i].name);
+}
+
+#define OFFSET(x) offsetof(SeparateContext, x)
+#define COMMON_OPTS \
+    { "durations", "durations of input at which to separate input", OFFSET(durations_str),  AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS }, \
+
+#if CONFIG_SEPARATE_FILTER
+
+static av_cold int video_init(AVFilterContext *ctx)
+{
+    return init(ctx, AVMEDIA_TYPE_VIDEO);
+}
+
+static int video_activate(AVFilterContext *ctx)
+{
+    SeparateContext *s = ctx->priv;
+    AVFrame *frame = NULL;
+    int ret, status;
+    int64_t pts;
+
+    for (int i = s->current_point; i < s->nb_points; i++) {
+        FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[i], ctx);
+    }
+
+    if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &frame)) > 0) {
+        if (s->current_frame >= s->points[s->current_point]) {
+            ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, frame->pts);
+            s->current_point++;
+        }
+
+        if (s->current_point >= s->nb_points) {
+            av_frame_free(&frame);
+            return AVERROR(EINVAL);
+        }
+
+        s->current_frame++;
+
+        ret = ff_filter_frame(ctx->outputs[s->current_point], frame);
+    }
+
+    if (ret < 0) {
+        return ret;
+    } else if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
+        for (int i = s->current_point; i < s->nb_points; i++)
+            ff_outlink_set_status(ctx->outputs[i], status, pts);
+        return 0;
+    } else {
+        for (int i = s->current_point; i < s->nb_points; i++) {
+            if (ff_outlink_frame_wanted(ctx->outputs[i]))
+                ff_inlink_request_frame(ctx->inputs[0]);
+        }
+        return 0;
+    }
+}
+
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption separate_options[] = {
+    { "frames", "frames at which to do separation", OFFSET(points_str), AV_OPT_TYPE_STRING,  { .str = "25" }, 0, 0, FLAGS },
+    COMMON_OPTS
+    { NULL }
+};
+#undef FLAGS
+
+AVFILTER_DEFINE_CLASS(separate);
+
+static const AVFilterPad separate_inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .config_props = config_input,
+    },
+    { NULL }
+};
+
+const AVFilter ff_vf_separate = {
+    .name        = "separate",
+    .description = NULL_IF_CONFIG_SMALL("Separate video stream."),
+    .init        = video_init,
+    .uninit      = uninit,
+    .priv_size   = sizeof(SeparateContext),
+    .priv_class  = &separate_class,
+    .activate    = video_activate,
+    .inputs      = separate_inputs,
+    .outputs     = NULL,
+    .flags       = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+#endif // CONFIG_SEPARATE_FILTER
+
+#if CONFIG_ASEPARATE_FILTER
+
+static av_cold int audio_init(AVFilterContext *ctx)
+{
+    return init(ctx, AVMEDIA_TYPE_AUDIO);
+}
+
+static int audio_activate(AVFilterContext *ctx)
+{
+    SeparateContext *s = ctx->priv;
+    AVFrame *frame = NULL;
+    int ret, status;
+    int64_t pts;
+
+    for (int i = s->current_point; i < s->nb_points; i++) {
+        FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[i], ctx);
+    }
+
+    if ((ret = ff_inlink_consume_samples(ctx->inputs[0], 1,
+                                         FFMIN(s->points[s->current_point] - s->current_sample, INT_MAX),
+                                         &frame)) > 0) {
+        s->current_sample += frame->nb_samples;
+
+        if (s->current_sample >= s->points[s->current_point]) {
+            ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, frame->pts);
+            s->current_point++;
+        }
+
+        if (s->current_point >= s->nb_points) {
+            av_frame_free(&frame);
+            return AVERROR(EINVAL);
+        }
+
+        ret = ff_filter_frame(ctx->outputs[s->current_point], frame);
+    }
+
+    if (ret < 0) {
+        return ret;
+    } else if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
+        for (int i = s->current_point; i < s->nb_points; i++)
+            ff_outlink_set_status(ctx->outputs[i], status, pts);
+        return 0;
+    } else {
+        for (int i = s->current_point; i < s->nb_points; i++) {
+            if (ff_outlink_frame_wanted(ctx->outputs[i]))
+                ff_inlink_request_frame(ctx->inputs[0]);
+        }
+        return 0;
+    }
+}
+
+#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption aseparate_options[] = {
+    { "samples", "samples at which to do separation", OFFSET(points_str), AV_OPT_TYPE_STRING,  { .str = "44100" }, 0, 0, FLAGS },
+    COMMON_OPTS
+    { NULL }
+};
+#undef FLAGS
+
+AVFILTER_DEFINE_CLASS(aseparate);
+
+static const AVFilterPad aseparate_inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_AUDIO,
+        .config_props = config_input,
+    },
+    { NULL }
+};
+
+const AVFilter ff_af_aseparate = {
+    .name        = "aseparate",
+    .description = NULL_IF_CONFIG_SMALL("Separate audio stream."),
+    .init        = audio_init,
+    .uninit      = uninit,
+    .priv_size   = sizeof(SeparateContext),
+    .priv_class  = &aseparate_class,
+    .activate    = audio_activate,
+    .inputs      = aseparate_inputs,
+    .outputs     = NULL,
+    .flags       = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
+};
+#endif // CONFIG_ASEPARATE_FILTER
-- 
2.17.1



More information about the ffmpeg-devel mailing list