[FFmpeg-cvslog] Merge commit 'f5950b8fd61ec85e0ad8790bea56b37ceea19436'
James Almer
git at videolan.org
Thu Oct 26 22:47:26 EEST 2017
ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Thu Oct 26 16:46:11 2017 -0300| [99ba85a0e24bad519a633a1a113d838897bee0b1] | committer: James Almer
Merge commit 'f5950b8fd61ec85e0ad8790bea56b37ceea19436'
* commit 'f5950b8fd61ec85e0ad8790bea56b37ceea19436':
lavfi: Drop unused and empty header file
Merged-by: James Almer <jamrial at gmail.com>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=99ba85a0e24bad519a633a1a113d838897bee0b1
---
doc/examples/filtering_audio.c | 1 -
doc/examples/filtering_video.c | 1 -
doc/examples/transcoding.c | 1 -
libavdevice/lavfi.c | 1 -
libavfilter/Makefile | 1 -
libavfilter/avfiltergraph.h | 28 ----------------------------
6 files changed, 33 deletions(-)
diff --git a/doc/examples/filtering_audio.c b/doc/examples/filtering_audio.c
index 9fc4f1cb7e..d58b9b75ea 100644
--- a/doc/examples/filtering_audio.c
+++ b/doc/examples/filtering_audio.c
@@ -32,7 +32,6 @@
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
-#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
diff --git a/doc/examples/filtering_video.c b/doc/examples/filtering_video.c
index 4e09c6fba4..fedb2e1c99 100644
--- a/doc/examples/filtering_video.c
+++ b/doc/examples/filtering_video.c
@@ -32,7 +32,6 @@
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
-#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
diff --git a/doc/examples/transcoding.c b/doc/examples/transcoding.c
index fb15a2148d..e1b3311081 100644
--- a/doc/examples/transcoding.c
+++ b/doc/examples/transcoding.c
@@ -30,7 +30,6 @@
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
-#include <libavfilter/avfiltergraph.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
diff --git a/libavdevice/lavfi.c b/libavdevice/lavfi.c
index 986e579813..5273717e6b 100644
--- a/libavdevice/lavfi.c
+++ b/libavdevice/lavfi.c
@@ -38,7 +38,6 @@
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavfilter/avfilter.h"
-#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavformat/avio_internal.h"
#include "libavformat/internal.h"
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index d2f0495f37..9873532d07 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -2,7 +2,6 @@ NAME = avfilter
DESC = FFmpeg audio/video filtering library
HEADERS = avfilter.h \
- avfiltergraph.h \
buffersink.h \
buffersrc.h \
version.h \
diff --git a/libavfilter/avfiltergraph.h b/libavfilter/avfiltergraph.h
deleted file mode 100644
index b31d581ca0..0000000000
--- a/libavfilter/avfiltergraph.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Filter graphs
- * copyright (c) 2007 Bobby Bingham
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVFILTER_AVFILTERGRAPH_H
-#define AVFILTER_AVFILTERGRAPH_H
-
-#include "avfilter.h"
-#include "libavutil/log.h"
-
-#endif /* AVFILTER_AVFILTERGRAPH_H */
======================================================================
diff --cc doc/examples/filtering_audio.c
index 9fc4f1cb7e,0000000000..d58b9b75ea
mode 100644,000000..100644
--- a/doc/examples/filtering_audio.c
+++ b/doc/examples/filtering_audio.c
@@@ -1,297 -1,0 +1,296 @@@
+/*
+ * Copyright (c) 2010 Nicolas George
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2012 Clément Bœsch
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * API example for audio decoding and filtering
+ * @example filtering_audio.c
+ */
+
+#include <unistd.h>
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
- #include <libavfilter/avfiltergraph.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
+
+static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
+static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
+
+static AVFormatContext *fmt_ctx;
+static AVCodecContext *dec_ctx;
+AVFilterContext *buffersink_ctx;
+AVFilterContext *buffersrc_ctx;
+AVFilterGraph *filter_graph;
+static int audio_stream_index = -1;
+
+static int open_input_file(const char *filename)
+{
+ int ret;
+ AVCodec *dec;
+
+ if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
+ return ret;
+ }
+
+ /* select the audio stream */
+ ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
+ return ret;
+ }
+ audio_stream_index = ret;
+
+ /* create decoding context */
+ dec_ctx = avcodec_alloc_context3(dec);
+ if (!dec_ctx)
+ return AVERROR(ENOMEM);
+ avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
+ av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
+
+ /* init the audio decoder */
+ if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_filters(const char *filters_descr)
+{
+ char args[512];
+ int ret = 0;
+ AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
+ AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
+ static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
+ static const int out_sample_rates[] = { 8000, -1 };
+ const AVFilterLink *outlink;
+ AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
+
+ filter_graph = avfilter_graph_alloc();
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ /* buffer audio source: the decoded frames from the decoder will be inserted here. */
+ if (!dec_ctx->channel_layout)
+ dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
+ snprintf(args, sizeof(args),
+ "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
+ time_base.num, time_base.den, dec_ctx->sample_rate,
+ av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
+ goto end;
+ }
+
+ /* buffer audio sink: to terminate the filter chain. */
+ ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
+ goto end;
+ }
+
+ /*
+ * Set the endpoints for the filter graph. The filter_graph will
+ * be linked to the graph described by filters_descr.
+ */
+
+ /*
+ * The buffer source output must be connected to the input pad of
+ * the first filter described by filters_descr; since the first
+ * filter input label is not specified, it is set to "in" by
+ * default.
+ */
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = buffersrc_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ /*
+ * The buffer sink input must be connected to the output pad of
+ * the last filter described by filters_descr; since the last
+ * filter output label is not specified, it is set to "out" by
+ * default.
+ */
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = buffersink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
+
+ if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
+ goto end;
+
+ /* Print summary of the sink buffer
+ * Note: args buffer is reused to store channel layout string */
+ outlink = buffersink_ctx->inputs[0];
+ av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
+ av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
+ (int)outlink->sample_rate,
+ (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
+ args);
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
+}
+
+static void print_frame(const AVFrame *frame)
+{
+ const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
+ const uint16_t *p = (uint16_t*)frame->data[0];
+ const uint16_t *p_end = p + n;
+
+ while (p < p_end) {
+ fputc(*p & 0xff, stdout);
+ fputc(*p>>8 & 0xff, stdout);
+ p++;
+ }
+ fflush(stdout);
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ AVPacket packet;
+ AVFrame *frame = av_frame_alloc();
+ AVFrame *filt_frame = av_frame_alloc();
+
+ if (!frame || !filt_frame) {
+ perror("Could not allocate frame");
+ exit(1);
+ }
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
+ exit(1);
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = init_filters(filter_descr)) < 0)
+ goto end;
+
+ /* read all packets */
+ while (1) {
+ if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
+ break;
+
+ if (packet.stream_index == audio_stream_index) {
+ ret = avcodec_send_packet(dec_ctx, &packet);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
+ break;
+ }
+
+ while (ret >= 0) {
+ ret = avcodec_receive_frame(dec_ctx, frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ break;
+ } else if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
+ goto end;
+ }
+
+ if (ret >= 0) {
+ /* push the audio data from decoded frame into the filtergraph */
+ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
+ break;
+ }
+
+ /* pull filtered audio from the filtergraph */
+ while (1) {
+ ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ break;
+ if (ret < 0)
+ goto end;
+ print_frame(filt_frame);
+ av_frame_unref(filt_frame);
+ }
+ av_frame_unref(frame);
+ }
+ }
+ }
+ av_packet_unref(&packet);
+ }
+end:
+ avfilter_graph_free(&filter_graph);
+ avcodec_free_context(&dec_ctx);
+ avformat_close_input(&fmt_ctx);
+ av_frame_free(&frame);
+ av_frame_free(&filt_frame);
+
+ if (ret < 0 && ret != AVERROR_EOF) {
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
+ exit(1);
+ }
+
+ exit(0);
+}
diff --cc doc/examples/filtering_video.c
index 4e09c6fba4,0000000000..fedb2e1c99
mode 100644,000000..100644
--- a/doc/examples/filtering_video.c
+++ b/doc/examples/filtering_video.c
@@@ -1,293 -1,0 +1,292 @@@
+/*
+ * Copyright (c) 2010 Nicolas George
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * API example for decoding and filtering
+ * @example filtering_video.c
+ */
+
+#define _XOPEN_SOURCE 600 /* for usleep */
+#include <unistd.h>
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
- #include <libavfilter/avfiltergraph.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
+
+const char *filter_descr = "scale=78:24,transpose=cclock";
+/* other way:
+ scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
+ */
+
+static AVFormatContext *fmt_ctx;
+static AVCodecContext *dec_ctx;
+AVFilterContext *buffersink_ctx;
+AVFilterContext *buffersrc_ctx;
+AVFilterGraph *filter_graph;
+static int video_stream_index = -1;
+static int64_t last_pts = AV_NOPTS_VALUE;
+
+static int open_input_file(const char *filename)
+{
+ int ret;
+ AVCodec *dec;
+
+ if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
+ return ret;
+ }
+
+ /* select the video stream */
+ ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
+ return ret;
+ }
+ video_stream_index = ret;
+
+ /* create decoding context */
+ dec_ctx = avcodec_alloc_context3(dec);
+ if (!dec_ctx)
+ return AVERROR(ENOMEM);
+ avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
+ av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
+
+ /* init the video decoder */
+ if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_filters(const char *filters_descr)
+{
+ char args[512];
+ int ret = 0;
+ AVFilter *buffersrc = avfilter_get_by_name("buffer");
+ AVFilter *buffersink = avfilter_get_by_name("buffersink");
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
+ enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
+
+ filter_graph = avfilter_graph_alloc();
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ /* buffer video source: the decoded frames from the decoder will be inserted here. */
+ snprintf(args, sizeof(args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
+ time_base.num, time_base.den,
+ dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
+
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
+ goto end;
+ }
+
+ /* buffer video sink: to terminate the filter chain. */
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
+ AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
+ goto end;
+ }
+
+ /*
+ * Set the endpoints for the filter graph. The filter_graph will
+ * be linked to the graph described by filters_descr.
+ */
+
+ /*
+ * The buffer source output must be connected to the input pad of
+ * the first filter described by filters_descr; since the first
+ * filter input label is not specified, it is set to "in" by
+ * default.
+ */
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = buffersrc_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ /*
+ * The buffer sink input must be connected to the output pad of
+ * the last filter described by filters_descr; since the last
+ * filter output label is not specified, it is set to "out" by
+ * default.
+ */
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = buffersink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
+
+ if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
+ goto end;
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
+}
+
+static void display_frame(const AVFrame *frame, AVRational time_base)
+{
+ int x, y;
+ uint8_t *p0, *p;
+ int64_t delay;
+
+ if (frame->pts != AV_NOPTS_VALUE) {
+ if (last_pts != AV_NOPTS_VALUE) {
+ /* sleep roughly the right amount of time;
+ * usleep is in microseconds, just like AV_TIME_BASE. */
+ delay = av_rescale_q(frame->pts - last_pts,
+ time_base, AV_TIME_BASE_Q);
+ if (delay > 0 && delay < 1000000)
+ usleep(delay);
+ }
+ last_pts = frame->pts;
+ }
+
+ /* Trivial ASCII grayscale display. */
+ p0 = frame->data[0];
+ puts("\033c");
+ for (y = 0; y < frame->height; y++) {
+ p = p0;
+ for (x = 0; x < frame->width; x++)
+ putchar(" .-+#"[*(p++) / 52]);
+ putchar('\n');
+ p0 += frame->linesize[0];
+ }
+ fflush(stdout);
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ AVPacket packet;
+ AVFrame *frame = av_frame_alloc();
+ AVFrame *filt_frame = av_frame_alloc();
+
+ if (!frame || !filt_frame) {
+ perror("Could not allocate frame");
+ exit(1);
+ }
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s file\n", argv[0]);
+ exit(1);
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = init_filters(filter_descr)) < 0)
+ goto end;
+
+ /* read all packets */
+ while (1) {
+ if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
+ break;
+
+ if (packet.stream_index == video_stream_index) {
+ ret = avcodec_send_packet(dec_ctx, &packet);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
+ break;
+ }
+
+ while (ret >= 0) {
+ ret = avcodec_receive_frame(dec_ctx, frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ break;
+ } else if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
+ goto end;
+ }
+
+ if (ret >= 0) {
+ frame->pts = frame->best_effort_timestamp;
+
+ /* push the decoded frame into the filtergraph */
+ if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
+ break;
+ }
+
+ /* pull filtered frames from the filtergraph */
+ while (1) {
+ ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ break;
+ if (ret < 0)
+ goto end;
+ display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
+ av_frame_unref(filt_frame);
+ }
+ av_frame_unref(frame);
+ }
+ }
+ }
+ av_packet_unref(&packet);
+ }
+end:
+ avfilter_graph_free(&filter_graph);
+ avcodec_free_context(&dec_ctx);
+ avformat_close_input(&fmt_ctx);
+ av_frame_free(&frame);
+ av_frame_free(&filt_frame);
+
+ if (ret < 0 && ret != AVERROR_EOF) {
+ fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
+ exit(1);
+ }
+
+ exit(0);
+}
diff --cc doc/examples/transcoding.c
index fb15a2148d,0000000000..e1b3311081
mode 100644,000000..100644
--- a/doc/examples/transcoding.c
+++ b/doc/examples/transcoding.c
@@@ -1,623 -1,0 +1,622 @@@
+/*
+ * Copyright (c) 2010 Nicolas George
+ * Copyright (c) 2011 Stefano Sabatini
+ * Copyright (c) 2014 Andrey Utkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * API example for demuxing, decoding, filtering, encoding and muxing
+ * @example transcoding.c
+ */
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
- #include <libavfilter/avfiltergraph.h>
+#include <libavfilter/buffersink.h>
+#include <libavfilter/buffersrc.h>
+#include <libavutil/opt.h>
+#include <libavutil/pixdesc.h>
+
+static AVFormatContext *ifmt_ctx;
+static AVFormatContext *ofmt_ctx;
+typedef struct FilteringContext {
+ AVFilterContext *buffersink_ctx;
+ AVFilterContext *buffersrc_ctx;
+ AVFilterGraph *filter_graph;
+} FilteringContext;
+static FilteringContext *filter_ctx;
+
+typedef struct StreamContext {
+ AVCodecContext *dec_ctx;
+ AVCodecContext *enc_ctx;
+} StreamContext;
+static StreamContext *stream_ctx;
+
+static int open_input_file(const char *filename)
+{
+ int ret;
+ unsigned int i;
+
+ ifmt_ctx = NULL;
+ if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
+ return ret;
+ }
+
+ if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
+ return ret;
+ }
+
+ stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
+ if (!stream_ctx)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ AVStream *stream = ifmt_ctx->streams[i];
+ AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
+ AVCodecContext *codec_ctx;
+ if (!dec) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
+ return AVERROR_DECODER_NOT_FOUND;
+ }
+ codec_ctx = avcodec_alloc_context3(dec);
+ if (!codec_ctx) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
+ return AVERROR(ENOMEM);
+ }
+ ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
+ "for stream #%u\n", i);
+ return ret;
+ }
+ /* Reencode video & audio and remux subtitles etc. */
+ if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
+ || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
+ codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
+ /* Open decoder */
+ ret = avcodec_open2(codec_ctx, dec, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
+ return ret;
+ }
+ }
+ stream_ctx[i].dec_ctx = codec_ctx;
+ }
+
+ av_dump_format(ifmt_ctx, 0, filename, 0);
+ return 0;
+}
+
+static int open_output_file(const char *filename)
+{
+ AVStream *out_stream;
+ AVStream *in_stream;
+ AVCodecContext *dec_ctx, *enc_ctx;
+ AVCodec *encoder;
+ int ret;
+ unsigned int i;
+
+ ofmt_ctx = NULL;
+ avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
+ if (!ofmt_ctx) {
+ av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
+ return AVERROR_UNKNOWN;
+ }
+
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ out_stream = avformat_new_stream(ofmt_ctx, NULL);
+ if (!out_stream) {
+ av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
+ return AVERROR_UNKNOWN;
+ }
+
+ in_stream = ifmt_ctx->streams[i];
+ dec_ctx = stream_ctx[i].dec_ctx;
+
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
+ || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ /* in this example, we choose transcoding to same codec */
+ encoder = avcodec_find_encoder(dec_ctx->codec_id);
+ if (!encoder) {
+ av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
+ return AVERROR_INVALIDDATA;
+ }
+ enc_ctx = avcodec_alloc_context3(encoder);
+ if (!enc_ctx) {
+ av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
+ return AVERROR(ENOMEM);
+ }
+
+ /* In this example, we transcode to same properties (picture size,
+ * sample rate etc.). These properties can be changed for output
+ * streams easily using filters */
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ enc_ctx->height = dec_ctx->height;
+ enc_ctx->width = dec_ctx->width;
+ enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
+ /* take first format from list of supported formats */
+ if (encoder->pix_fmts)
+ enc_ctx->pix_fmt = encoder->pix_fmts[0];
+ else
+ enc_ctx->pix_fmt = dec_ctx->pix_fmt;
+ /* video time_base can be set to whatever is handy and supported by encoder */
+ enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
+ } else {
+ enc_ctx->sample_rate = dec_ctx->sample_rate;
+ enc_ctx->channel_layout = dec_ctx->channel_layout;
+ enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
+ /* take first format from list of supported formats */
+ enc_ctx->sample_fmt = encoder->sample_fmts[0];
+ enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
+ }
+
+ /* Third parameter can be used to pass settings to encoder */
+ ret = avcodec_open2(enc_ctx, encoder, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
+ return ret;
+ }
+ ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
+ return ret;
+ }
+ if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
+ enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
+
+ out_stream->time_base = enc_ctx->time_base;
+ stream_ctx[i].enc_ctx = enc_ctx;
+ } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
+ av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
+ return AVERROR_INVALIDDATA;
+ } else {
+ /* if this stream must be remuxed */
+ ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
+ return ret;
+ }
+ out_stream->time_base = in_stream->time_base;
+ }
+
+ }
+ av_dump_format(ofmt_ctx, 0, filename, 1);
+
+ if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
+ ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
+ return ret;
+ }
+ }
+
+ /* init muxer, write output file header */
+ ret = avformat_write_header(ofmt_ctx, NULL);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
+ AVCodecContext *enc_ctx, const char *filter_spec)
+{
+ char args[512];
+ int ret = 0;
+ AVFilter *buffersrc = NULL;
+ AVFilter *buffersink = NULL;
+ AVFilterContext *buffersrc_ctx = NULL;
+ AVFilterContext *buffersink_ctx = NULL;
+ AVFilterInOut *outputs = avfilter_inout_alloc();
+ AVFilterInOut *inputs = avfilter_inout_alloc();
+ AVFilterGraph *filter_graph = avfilter_graph_alloc();
+
+ if (!outputs || !inputs || !filter_graph) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ buffersrc = avfilter_get_by_name("buffer");
+ buffersink = avfilter_get_by_name("buffersink");
+ if (!buffersrc || !buffersink) {
+ av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ snprintf(args, sizeof(args),
+ "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
+ dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
+ dec_ctx->time_base.num, dec_ctx->time_base.den,
+ dec_ctx->sample_aspect_ratio.num,
+ dec_ctx->sample_aspect_ratio.den);
+
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
+ goto end;
+ }
+
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
+ (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
+ goto end;
+ }
+ } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ buffersrc = avfilter_get_by_name("abuffer");
+ buffersink = avfilter_get_by_name("abuffersink");
+ if (!buffersrc || !buffersink) {
+ av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ if (!dec_ctx->channel_layout)
+ dec_ctx->channel_layout =
+ av_get_default_channel_layout(dec_ctx->channels);
+ snprintf(args, sizeof(args),
+ "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
+ dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
+ av_get_sample_fmt_name(dec_ctx->sample_fmt),
+ dec_ctx->channel_layout);
+ ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
+ args, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
+ goto end;
+ }
+
+ ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
+ NULL, NULL, filter_graph);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
+ (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
+ (uint8_t*)&enc_ctx->channel_layout,
+ sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
+ goto end;
+ }
+
+ ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
+ (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
+ goto end;
+ }
+ } else {
+ ret = AVERROR_UNKNOWN;
+ goto end;
+ }
+
+ /* Endpoints for the filter graph. */
+ outputs->name = av_strdup("in");
+ outputs->filter_ctx = buffersrc_ctx;
+ outputs->pad_idx = 0;
+ outputs->next = NULL;
+
+ inputs->name = av_strdup("out");
+ inputs->filter_ctx = buffersink_ctx;
+ inputs->pad_idx = 0;
+ inputs->next = NULL;
+
+ if (!outputs->name || !inputs->name) {
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
+ &inputs, &outputs, NULL)) < 0)
+ goto end;
+
+ if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
+ goto end;
+
+ /* Fill FilteringContext */
+ fctx->buffersrc_ctx = buffersrc_ctx;
+ fctx->buffersink_ctx = buffersink_ctx;
+ fctx->filter_graph = filter_graph;
+
+end:
+ avfilter_inout_free(&inputs);
+ avfilter_inout_free(&outputs);
+
+ return ret;
+}
+
+static int init_filters(void)
+{
+ const char *filter_spec;
+ unsigned int i;
+ int ret;
+ filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
+ if (!filter_ctx)
+ return AVERROR(ENOMEM);
+
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ filter_ctx[i].buffersrc_ctx = NULL;
+ filter_ctx[i].buffersink_ctx = NULL;
+ filter_ctx[i].filter_graph = NULL;
+ if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
+ || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
+ continue;
+
+
+ if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
+ filter_spec = "null"; /* passthrough (dummy) filter for video */
+ else
+ filter_spec = "anull"; /* passthrough (dummy) filter for audio */
+ ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
+ stream_ctx[i].enc_ctx, filter_spec);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
+ int ret;
+ int got_frame_local;
+ AVPacket enc_pkt;
+ int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
+ (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
+ AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
+
+ if (!got_frame)
+ got_frame = &got_frame_local;
+
+ av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
+ /* encode filtered frame */
+ enc_pkt.data = NULL;
+ enc_pkt.size = 0;
+ av_init_packet(&enc_pkt);
+ ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
+ filt_frame, got_frame);
+ av_frame_free(&filt_frame);
+ if (ret < 0)
+ return ret;
+ if (!(*got_frame))
+ return 0;
+
+ /* prepare packet for muxing */
+ enc_pkt.stream_index = stream_index;
+ av_packet_rescale_ts(&enc_pkt,
+ stream_ctx[stream_index].enc_ctx->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
+ /* mux encoded frame */
+ ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
+ return ret;
+}
+
+static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
+{
+ int ret;
+ AVFrame *filt_frame;
+
+ av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
+ /* push the decoded frame into the filtergraph */
+ ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
+ frame, 0);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
+ return ret;
+ }
+
+ /* pull filtered frames from the filtergraph */
+ while (1) {
+ filt_frame = av_frame_alloc();
+ if (!filt_frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
+ ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
+ filt_frame);
+ if (ret < 0) {
+ /* if no more frames for output - returns AVERROR(EAGAIN)
+ * if flushed and no more frames for output - returns AVERROR_EOF
+ * rewrite retcode to 0 to show it as normal procedure completion
+ */
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ ret = 0;
+ av_frame_free(&filt_frame);
+ break;
+ }
+
+ filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
+ ret = encode_write_frame(filt_frame, stream_index, NULL);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int flush_encoder(unsigned int stream_index)
+{
+ int ret;
+ int got_frame;
+
+ if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
+ AV_CODEC_CAP_DELAY))
+ return 0;
+
+ while (1) {
+ av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
+ ret = encode_write_frame(NULL, stream_index, &got_frame);
+ if (ret < 0)
+ break;
+ if (!got_frame)
+ return 0;
+ }
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+ AVPacket packet = { .data = NULL, .size = 0 };
+ AVFrame *frame = NULL;
+ enum AVMediaType type;
+ unsigned int stream_index;
+ unsigned int i;
+ int got_frame;
+ int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
+
+ if (argc != 3) {
+ av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
+ return 1;
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = open_output_file(argv[2])) < 0)
+ goto end;
+ if ((ret = init_filters()) < 0)
+ goto end;
+
+ /* read all packets */
+ while (1) {
+ if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
+ break;
+ stream_index = packet.stream_index;
+ type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
+ av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
+ stream_index);
+
+ if (filter_ctx[stream_index].filter_graph) {
+ av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
+ frame = av_frame_alloc();
+ if (!frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ stream_ctx[stream_index].dec_ctx->time_base);
+ dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
+ avcodec_decode_audio4;
+ ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
+ &got_frame, &packet);
+ if (ret < 0) {
+ av_frame_free(&frame);
+ av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
+ break;
+ }
+
+ if (got_frame) {
+ frame->pts = frame->best_effort_timestamp;
+ ret = filter_encode_write_frame(frame, stream_index);
+ av_frame_free(&frame);
+ if (ret < 0)
+ goto end;
+ } else {
+ av_frame_free(&frame);
+ }
+ } else {
+ /* remux this frame without reencoding */
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ ret = av_interleaved_write_frame(ofmt_ctx, &packet);
+ if (ret < 0)
+ goto end;
+ }
+ av_packet_unref(&packet);
+ }
+
+ /* flush filters and encoders */
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ /* flush filter */
+ if (!filter_ctx[i].filter_graph)
+ continue;
+ ret = filter_encode_write_frame(NULL, i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
+ goto end;
+ }
+
+ /* flush encoder */
+ ret = flush_encoder(i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
+ goto end;
+ }
+ }
+
+ av_write_trailer(ofmt_ctx);
+end:
+ av_packet_unref(&packet);
+ av_frame_free(&frame);
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ avcodec_free_context(&stream_ctx[i].dec_ctx);
+ if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
+ avcodec_free_context(&stream_ctx[i].enc_ctx);
+ if (filter_ctx && filter_ctx[i].filter_graph)
+ avfilter_graph_free(&filter_ctx[i].filter_graph);
+ }
+ av_free(filter_ctx);
+ av_free(stream_ctx);
+ avformat_close_input(&ifmt_ctx);
+ if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
+ avio_closep(&ofmt_ctx->pb);
+ avformat_free_context(ofmt_ctx);
+
+ if (ret < 0)
+ av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
+
+ return ret ? 1 : 0;
+}
diff --cc libavdevice/lavfi.c
index 986e579813,0000000000..5273717e6b
mode 100644,000000..100644
--- a/libavdevice/lavfi.c
+++ b/libavdevice/lavfi.c
@@@ -1,516 -1,0 +1,515 @@@
+/*
+ * Copyright (c) 2011 Stefano Sabatini
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * libavfilter virtual input device
+ */
+
+/* #define DEBUG */
+
+#include <float.h> /* DBL_MIN, DBL_MAX */
+
+#include "libavutil/bprint.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/file.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/log.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/parseutils.h"
+#include "libavutil/pixdesc.h"
+#include "libavfilter/avfilter.h"
- #include "libavfilter/avfiltergraph.h"
+#include "libavfilter/buffersink.h"
+#include "libavformat/avio_internal.h"
+#include "libavformat/internal.h"
+#include "avdevice.h"
+
+typedef struct {
+ AVClass *class; ///< class for private options
+ char *graph_str;
+ char *graph_filename;
+ char *dump_graph;
+ AVFilterGraph *graph;
+ AVFilterContext **sinks;
+ int *sink_stream_map;
+ int *sink_eof;
+ int *stream_sink_map;
+ int *sink_stream_subcc_map;
+ AVFrame *decoded_frame;
+ int nb_sinks;
+ AVPacket subcc_packet;
+} LavfiContext;
+
+static int *create_all_formats(int n)
+{
+ int i, j, *fmts, count = 0;
+
+ for (i = 0; i < n; i++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ count++;
+ }
+
+ if (!(fmts = av_malloc((count+1) * sizeof(int))))
+ return NULL;
+ for (j = 0, i = 0; i < n; i++) {
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
+ if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
+ fmts[j++] = i;
+ }
+ fmts[j] = -1;
+ return fmts;
+}
+
+av_cold static int lavfi_read_close(AVFormatContext *avctx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+
+ av_freep(&lavfi->sink_stream_map);
+ av_freep(&lavfi->sink_eof);
+ av_freep(&lavfi->stream_sink_map);
+ av_freep(&lavfi->sink_stream_subcc_map);
+ av_freep(&lavfi->sinks);
+ avfilter_graph_free(&lavfi->graph);
+ av_frame_free(&lavfi->decoded_frame);
+
+ return 0;
+}
+
+static int create_subcc_streams(AVFormatContext *avctx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ AVStream *st;
+ int stream_idx, sink_idx;
+
+ for (stream_idx = 0; stream_idx < lavfi->nb_sinks; stream_idx++) {
+ sink_idx = lavfi->stream_sink_map[stream_idx];
+ if (lavfi->sink_stream_subcc_map[sink_idx]) {
+ lavfi->sink_stream_subcc_map[sink_idx] = avctx->nb_streams;
+ if (!(st = avformat_new_stream(avctx, NULL)))
+ return AVERROR(ENOMEM);
+ st->codecpar->codec_id = AV_CODEC_ID_EIA_608;
+ st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
+ } else {
+ lavfi->sink_stream_subcc_map[sink_idx] = -1;
+ }
+ }
+ return 0;
+}
+
+av_cold static int lavfi_read_header(AVFormatContext *avctx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
+ const AVFilter *buffersink, *abuffersink;
+ int *pix_fmts = create_all_formats(AV_PIX_FMT_NB);
+ enum AVMediaType type;
+ int ret = 0, i, n;
+
+#define FAIL(ERR) { ret = ERR; goto end; }
+
+ if (!pix_fmts)
+ FAIL(AVERROR(ENOMEM));
+
+ avfilter_register_all();
+
+ buffersink = avfilter_get_by_name("buffersink");
+ abuffersink = avfilter_get_by_name("abuffersink");
+
+ if (lavfi->graph_filename && lavfi->graph_str) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Only one of the graph or graph_file options must be specified\n");
+ FAIL(AVERROR(EINVAL));
+ }
+
+ if (lavfi->graph_filename) {
+ AVBPrint graph_file_pb;
+ AVIOContext *avio = NULL;
+ AVDictionary *options = NULL;
+ if (avctx->protocol_whitelist && (ret = av_dict_set(&options, "protocol_whitelist", avctx->protocol_whitelist, 0)) < 0)
+ goto end;
+ ret = avio_open2(&avio, lavfi->graph_filename, AVIO_FLAG_READ, &avctx->interrupt_callback, &options);
+ av_dict_set(&options, "protocol_whitelist", NULL, 0);
+ if (ret < 0)
+ goto end;
+ av_bprint_init(&graph_file_pb, 0, AV_BPRINT_SIZE_UNLIMITED);
+ ret = avio_read_to_bprint(avio, &graph_file_pb, INT_MAX);
+ avio_closep(&avio);
+ av_bprint_chars(&graph_file_pb, '\0', 1);
+ if (!ret && !av_bprint_is_complete(&graph_file_pb))
+ ret = AVERROR(ENOMEM);
+ if (ret) {
+ av_bprint_finalize(&graph_file_pb, NULL);
+ goto end;
+ }
+ if ((ret = av_bprint_finalize(&graph_file_pb, &lavfi->graph_str)))
+ goto end;
+ }
+
+ if (!lavfi->graph_str)
+ lavfi->graph_str = av_strdup(avctx->filename);
+
+ /* parse the graph, create a stream for each open output */
+ if (!(lavfi->graph = avfilter_graph_alloc()))
+ FAIL(AVERROR(ENOMEM));
+
+ if ((ret = avfilter_graph_parse_ptr(lavfi->graph, lavfi->graph_str,
+ &input_links, &output_links, avctx)) < 0)
+ goto end;
+
+ if (input_links) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Open inputs in the filtergraph are not acceptable\n");
+ FAIL(AVERROR(EINVAL));
+ }
+
+ /* count the outputs */
+ for (n = 0, inout = output_links; inout; n++, inout = inout->next);
+ lavfi->nb_sinks = n;
+
+ if (!(lavfi->sink_stream_map = av_malloc(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+ if (!(lavfi->sink_eof = av_mallocz(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+ if (!(lavfi->stream_sink_map = av_malloc(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+ if (!(lavfi->sink_stream_subcc_map = av_malloc(sizeof(int) * n)))
+ FAIL(AVERROR(ENOMEM));
+
+ for (i = 0; i < n; i++)
+ lavfi->stream_sink_map[i] = -1;
+
+ /* parse the output link names - they need to be of the form out0, out1, ...
+ * create a mapping between them and the streams */
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ int stream_idx = 0, suffix = 0, use_subcc = 0;
+ sscanf(inout->name, "out%n%d%n", &suffix, &stream_idx, &suffix);
+ if (!suffix) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid outpad name '%s'\n", inout->name);
+ FAIL(AVERROR(EINVAL));
+ }
+ if (inout->name[suffix]) {
+ if (!strcmp(inout->name + suffix, "+subcc")) {
+ use_subcc = 1;
+ } else {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid outpad suffix '%s'\n", inout->name);
+ FAIL(AVERROR(EINVAL));
+ }
+ }
+
+ if ((unsigned)stream_idx >= n) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Invalid index was specified in output '%s', "
+ "must be a non-negative value < %d\n",
+ inout->name, n);
+ FAIL(AVERROR(EINVAL));
+ }
+
+ if (lavfi->stream_sink_map[stream_idx] != -1) {
+ av_log(avctx, AV_LOG_ERROR,
+ "An output with stream index %d was already specified\n",
+ stream_idx);
+ FAIL(AVERROR(EINVAL));
+ }
+ lavfi->sink_stream_map[i] = stream_idx;
+ lavfi->stream_sink_map[stream_idx] = i;
+ lavfi->sink_stream_subcc_map[i] = !!use_subcc;
+ }
+
+ /* for each open output create a corresponding stream */
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ AVStream *st;
+ if (!(st = avformat_new_stream(avctx, NULL)))
+ FAIL(AVERROR(ENOMEM));
+ st->id = i;
+ }
+
+ /* create a sink for each output and connect them to the graph */
+ lavfi->sinks = av_malloc_array(lavfi->nb_sinks, sizeof(AVFilterContext *));
+ if (!lavfi->sinks)
+ FAIL(AVERROR(ENOMEM));
+
+ for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
+ AVFilterContext *sink;
+
+ type = avfilter_pad_get_type(inout->filter_ctx->output_pads, inout->pad_idx);
+
+ if (type == AVMEDIA_TYPE_VIDEO && ! buffersink ||
+ type == AVMEDIA_TYPE_AUDIO && ! abuffersink) {
+ av_log(avctx, AV_LOG_ERROR, "Missing required buffersink filter, aborting.\n");
+ FAIL(AVERROR_FILTER_NOT_FOUND);
+ }
+
+ if (type == AVMEDIA_TYPE_VIDEO) {
+ ret = avfilter_graph_create_filter(&sink, buffersink,
+ inout->name, NULL,
+ NULL, lavfi->graph);
+ if (ret >= 0)
+ ret = av_opt_set_int_list(sink, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ } else if (type == AVMEDIA_TYPE_AUDIO) {
+ enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_U8,
+ AV_SAMPLE_FMT_S16,
+ AV_SAMPLE_FMT_S32,
+ AV_SAMPLE_FMT_FLT,
+ AV_SAMPLE_FMT_DBL, -1 };
+
+ ret = avfilter_graph_create_filter(&sink, abuffersink,
+ inout->name, NULL,
+ NULL, lavfi->graph);
+ if (ret >= 0)
+ ret = av_opt_set_int_list(sink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ ret = av_opt_set_int(sink, "all_channel_counts", 1,
+ AV_OPT_SEARCH_CHILDREN);
+ if (ret < 0)
+ goto end;
+ } else {
+ av_log(avctx, AV_LOG_ERROR,
+ "Output '%s' is not a video or audio output, not yet supported\n", inout->name);
+ FAIL(AVERROR(EINVAL));
+ }
+
+ lavfi->sinks[i] = sink;
+ if ((ret = avfilter_link(inout->filter_ctx, inout->pad_idx, sink, 0)) < 0)
+ goto end;
+ }
+
+ /* configure the graph */
+ if ((ret = avfilter_graph_config(lavfi->graph, avctx)) < 0)
+ goto end;
+
+ if (lavfi->dump_graph) {
+ char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
+ fputs(dump, stderr);
+ fflush(stderr);
+ av_free(dump);
+ }
+
+ /* fill each stream with the information in the corresponding sink */
+ for (i = 0; i < lavfi->nb_sinks; i++) {
+ AVFilterContext *sink = lavfi->sinks[lavfi->stream_sink_map[i]];
+ AVRational time_base = av_buffersink_get_time_base(sink);
+ AVStream *st = avctx->streams[i];
+ st->codecpar->codec_type = av_buffersink_get_type(sink);
+ avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
+ if (av_buffersink_get_type(sink) == AVMEDIA_TYPE_VIDEO) {
+ st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
+ st->codecpar->format = av_buffersink_get_format(sink);
+ st->codecpar->width = av_buffersink_get_w(sink);
+ st->codecpar->height = av_buffersink_get_h(sink);
+ st ->sample_aspect_ratio =
+ st->codecpar->sample_aspect_ratio = av_buffersink_get_sample_aspect_ratio(sink);
+ avctx->probesize = FFMAX(avctx->probesize,
+ av_buffersink_get_w(sink) * av_buffersink_get_h(sink) *
+ av_get_padded_bits_per_pixel(av_pix_fmt_desc_get(av_buffersink_get_format(sink))) *
+ 30);
+ } else if (av_buffersink_get_type(sink) == AVMEDIA_TYPE_AUDIO) {
+ st->codecpar->codec_id = av_get_pcm_codec(av_buffersink_get_format(sink), -1);
+ st->codecpar->channels = av_buffersink_get_channels(sink);
+ st->codecpar->format = av_buffersink_get_format(sink);
+ st->codecpar->sample_rate = av_buffersink_get_sample_rate(sink);
+ st->codecpar->channel_layout = av_buffersink_get_channel_layout(sink);
+ if (st->codecpar->codec_id == AV_CODEC_ID_NONE)
+ av_log(avctx, AV_LOG_ERROR,
+ "Could not find PCM codec for sample format %s.\n",
+ av_get_sample_fmt_name(av_buffersink_get_format(sink)));
+ }
+ }
+
+ if ((ret = create_subcc_streams(avctx)) < 0)
+ goto end;
+
+ if (!(lavfi->decoded_frame = av_frame_alloc()))
+ FAIL(AVERROR(ENOMEM));
+
+end:
+ av_free(pix_fmts);
+ avfilter_inout_free(&input_links);
+ avfilter_inout_free(&output_links);
+ if (ret < 0)
+ lavfi_read_close(avctx);
+ return ret;
+}
+
+static int create_subcc_packet(AVFormatContext *avctx, AVFrame *frame,
+ int sink_idx)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ AVFrameSideData *sd;
+ int stream_idx, i, ret;
+
+ if ((stream_idx = lavfi->sink_stream_subcc_map[sink_idx]) < 0)
+ return 0;
+ for (i = 0; i < frame->nb_side_data; i++)
+ if (frame->side_data[i]->type == AV_FRAME_DATA_A53_CC)
+ break;
+ if (i >= frame->nb_side_data)
+ return 0;
+ sd = frame->side_data[i];
+ if ((ret = av_new_packet(&lavfi->subcc_packet, sd->size)) < 0)
+ return ret;
+ memcpy(lavfi->subcc_packet.data, sd->data, sd->size);
+ lavfi->subcc_packet.stream_index = stream_idx;
+ lavfi->subcc_packet.pts = frame->pts;
+ lavfi->subcc_packet.pos = frame->pkt_pos;
+ return 0;
+}
+
+static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+ LavfiContext *lavfi = avctx->priv_data;
+ double min_pts = DBL_MAX;
+ int stream_idx, min_pts_sink_idx = 0;
+ AVFrame *frame = lavfi->decoded_frame;
+ AVDictionary *frame_metadata;
+ int ret, i;
+ int size = 0;
+
+ if (lavfi->subcc_packet.size) {
+ *pkt = lavfi->subcc_packet;
+ av_init_packet(&lavfi->subcc_packet);
+ lavfi->subcc_packet.size = 0;
+ lavfi->subcc_packet.data = NULL;
+ return pkt->size;
+ }
+
+ /* iterate through all the graph sinks. Select the sink with the
+ * minimum PTS */
+ for (i = 0; i < lavfi->nb_sinks; i++) {
+ AVRational tb = av_buffersink_get_time_base(lavfi->sinks[i]);
+ double d;
+ int ret;
+
+ if (lavfi->sink_eof[i])
+ continue;
+
+ ret = av_buffersink_get_frame_flags(lavfi->sinks[i], frame,
+ AV_BUFFERSINK_FLAG_PEEK);
+ if (ret == AVERROR_EOF) {
+ ff_dlog(avctx, "EOF sink_idx:%d\n", i);
+ lavfi->sink_eof[i] = 1;
+ continue;
+ } else if (ret < 0)
+ return ret;
+ d = av_rescale_q_rnd(frame->pts, tb, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ ff_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
+ av_frame_unref(frame);
+
+ if (d < min_pts) {
+ min_pts = d;
+ min_pts_sink_idx = i;
+ }
+ }
+ if (min_pts == DBL_MAX)
+ return AVERROR_EOF;
+
+ ff_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx);
+
+ av_buffersink_get_frame_flags(lavfi->sinks[min_pts_sink_idx], frame, 0);
+ stream_idx = lavfi->sink_stream_map[min_pts_sink_idx];
+
+ if (frame->width /* FIXME best way of testing a video */) {
+ size = av_image_get_buffer_size(frame->format, frame->width, frame->height, 1);
+ if ((ret = av_new_packet(pkt, size)) < 0)
+ return ret;
+
+ av_image_copy_to_buffer(pkt->data, size, (const uint8_t **)frame->data, frame->linesize,
+ frame->format, frame->width, frame->height, 1);
+ } else if (frame->channels /* FIXME test audio */) {
+ size = frame->nb_samples * av_get_bytes_per_sample(frame->format) *
+ frame->channels;
+ if ((ret = av_new_packet(pkt, size)) < 0)
+ return ret;
+ memcpy(pkt->data, frame->data[0], size);
+ }
+
+ frame_metadata = frame->metadata;
+ if (frame_metadata) {
+ uint8_t *metadata;
+ AVDictionaryEntry *e = NULL;
+ AVBPrint meta_buf;
+
+ av_bprint_init(&meta_buf, 0, AV_BPRINT_SIZE_UNLIMITED);
+ while ((e = av_dict_get(frame_metadata, "", e, AV_DICT_IGNORE_SUFFIX))) {
+ av_bprintf(&meta_buf, "%s", e->key);
+ av_bprint_chars(&meta_buf, '\0', 1);
+ av_bprintf(&meta_buf, "%s", e->value);
+ av_bprint_chars(&meta_buf, '\0', 1);
+ }
+ if (!av_bprint_is_complete(&meta_buf) ||
+ !(metadata = av_packet_new_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA,
+ meta_buf.len))) {
+ av_bprint_finalize(&meta_buf, NULL);
+ return AVERROR(ENOMEM);
+ }
+ memcpy(metadata, meta_buf.str, meta_buf.len);
+ av_bprint_finalize(&meta_buf, NULL);
+ }
+
+ if ((ret = create_subcc_packet(avctx, frame, min_pts_sink_idx)) < 0) {
+ av_frame_unref(frame);
+ av_packet_unref(pkt);
+ return ret;
+ }
+
+ pkt->stream_index = stream_idx;
+ pkt->pts = frame->pts;
+ pkt->pos = frame->pkt_pos;
+ pkt->size = size;
+ av_frame_unref(frame);
+ return size;
+}
+
+#define OFFSET(x) offsetof(LavfiContext, x)
+
+#define DEC AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ { "graph", "set libavfilter graph", OFFSET(graph_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { "graph_file","set libavfilter graph filename", OFFSET(graph_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC},
+ { "dumpgraph", "dump graph to stderr", OFFSET(dump_graph), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
+ { NULL },
+};
+
+static const AVClass lavfi_class = {
+ .class_name = "lavfi indev",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+ .category = AV_CLASS_CATEGORY_DEVICE_INPUT,
+};
+
+AVInputFormat ff_lavfi_demuxer = {
+ .name = "lavfi",
+ .long_name = NULL_IF_CONFIG_SMALL("Libavfilter virtual input device"),
+ .priv_data_size = sizeof(LavfiContext),
+ .read_header = lavfi_read_header,
+ .read_packet = lavfi_read_packet,
+ .read_close = lavfi_read_close,
+ .flags = AVFMT_NOFILE,
+ .priv_class = &lavfi_class,
+};
diff --cc libavfilter/Makefile
index d2f0495f37,348ad92116..9873532d07
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@@ -1,8 -1,7 +1,7 @@@
NAME = avfilter
-DESC = Libav video filtering library
+DESC = FFmpeg audio/video filtering library
HEADERS = avfilter.h \
- avfiltergraph.h \
buffersink.h \
buffersrc.h \
version.h \
More information about the ffmpeg-cvslog
mailing list