[FFmpeg-cvslog] Merge commit '328cd2b599bc2d0d38f3c12606fa2a66eeec016e'
James Almer
git at videolan.org
Fri Apr 7 06:48:33 EEST 2017
ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Fri Apr 7 00:42:38 2017 -0300| [bd9057e74bcddd8893596e02d52e938e63c8cd6a] | committer: James Almer
Merge commit '328cd2b599bc2d0d38f3c12606fa2a66eeec016e'
* commit '328cd2b599bc2d0d38f3c12606fa2a66eeec016e':
lavc: move encoding-related code from utils.c to a new file
Merged-by: James Almer <jamrial at gmail.com>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=bd9057e74bcddd8893596e02d52e938e63c8cd6a
---
libavcodec/Makefile | 1 +
libavcodec/encode.c | 454 ++++++++++++++++++++++++++++++++++++++++++++++++++++
libavcodec/utils.c | 424 ------------------------------------------------
3 files changed, 455 insertions(+), 424 deletions(-)
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index a2f200e..e33f49d 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -33,6 +33,7 @@ OBJS = allcodecs.o \
d3d11va.o \
dirac.o \
dv_profile.o \
+ encode.o \
imgconvert.o \
jni.o \
mathtables.o \
diff --git a/libavcodec/encode.c b/libavcodec/encode.c
new file mode 100644
index 0000000..6e9d487
--- /dev/null
+++ b/libavcodec/encode.c
@@ -0,0 +1,454 @@
+/*
+ * generic encoding-related code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/attributes.h"
+#include "libavutil/avassert.h"
+#include "libavutil/frame.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/internal.h"
+#include "libavutil/samplefmt.h"
+
+#include "avcodec.h"
+#include "frame_thread_encoder.h"
+#include "internal.h"
+
+int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
+{
+ if (avpkt->size < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
+ return AVERROR(EINVAL);
+ }
+ if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
+ size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
+ return AVERROR(EINVAL);
+ }
+
+ if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
+ av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
+ if (!avpkt->data || avpkt->size < size) {
+ av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
+ avpkt->data = avctx->internal->byte_buffer;
+ avpkt->size = avctx->internal->byte_buffer_size;
+ }
+ }
+
+ if (avpkt->data) {
+ AVBufferRef *buf = avpkt->buf;
+
+ if (avpkt->size < size) {
+ av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
+ return AVERROR(EINVAL);
+ }
+
+ av_init_packet(avpkt);
+ avpkt->buf = buf;
+ avpkt->size = size;
+ return 0;
+ } else {
+ int ret = av_new_packet(avpkt, size);
+ if (ret < 0)
+ av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
+ return ret;
+ }
+}
+
+int ff_alloc_packet(AVPacket *avpkt, int size)
+{
+ return ff_alloc_packet2(NULL, avpkt, size, 0);
+}
+
+/**
+ * Pad last frame with silence.
+ */
+static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
+{
+ AVFrame *frame = NULL;
+ int ret;
+
+ if (!(frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+
+ frame->format = src->format;
+ frame->channel_layout = src->channel_layout;
+ av_frame_set_channels(frame, av_frame_get_channels(src));
+ frame->nb_samples = s->frame_size;
+ ret = av_frame_get_buffer(frame, 32);
+ if (ret < 0)
+ goto fail;
+
+ ret = av_frame_copy_props(frame, src);
+ if (ret < 0)
+ goto fail;
+
+ if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
+ src->nb_samples, s->channels, s->sample_fmt)) < 0)
+ goto fail;
+ if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
+ frame->nb_samples - src->nb_samples,
+ s->channels, s->sample_fmt)) < 0)
+ goto fail;
+
+ *dst = frame;
+
+ return 0;
+
+fail:
+ av_frame_free(&frame);
+ return ret;
+}
+
+int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
+ AVPacket *avpkt,
+ const AVFrame *frame,
+ int *got_packet_ptr)
+{
+ AVFrame *extended_frame = NULL;
+ AVFrame *padded_frame = NULL;
+ int ret;
+ AVPacket user_pkt = *avpkt;
+ int needs_realloc = !user_pkt.data;
+
+ *got_packet_ptr = 0;
+
+ if (!avctx->codec->encode2) {
+ av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
+ return AVERROR(ENOSYS);
+ }
+
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
+ av_packet_unref(avpkt);
+ av_init_packet(avpkt);
+ return 0;
+ }
+
+ /* ensure that extended_data is properly set */
+ if (frame && !frame->extended_data) {
+ if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
+ avctx->channels > AV_NUM_DATA_POINTERS) {
+ av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
+ "with more than %d channels, but extended_data is not set.\n",
+ AV_NUM_DATA_POINTERS);
+ return AVERROR(EINVAL);
+ }
+ av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
+
+ extended_frame = av_frame_alloc();
+ if (!extended_frame)
+ return AVERROR(ENOMEM);
+
+ memcpy(extended_frame, frame, sizeof(AVFrame));
+ extended_frame->extended_data = extended_frame->data;
+ frame = extended_frame;
+ }
+
+ /* extract audio service type metadata */
+ if (frame) {
+ AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
+ if (sd && sd->size >= sizeof(enum AVAudioServiceType))
+ avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
+ }
+
+ /* check for valid frame size */
+ if (frame) {
+ if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
+ if (frame->nb_samples > avctx->frame_size) {
+ av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
+ if (frame->nb_samples < avctx->frame_size &&
+ !avctx->internal->last_audio_frame) {
+ ret = pad_last_frame(avctx, &padded_frame, frame);
+ if (ret < 0)
+ goto end;
+
+ frame = padded_frame;
+ avctx->internal->last_audio_frame = 1;
+ }
+
+ if (frame->nb_samples != avctx->frame_size) {
+ av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ }
+ }
+
+ av_assert0(avctx->codec->encode2);
+
+ ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
+ if (!ret) {
+ if (*got_packet_ptr) {
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
+ if (avpkt->pts == AV_NOPTS_VALUE)
+ avpkt->pts = frame->pts;
+ if (!avpkt->duration)
+ avpkt->duration = ff_samples_to_time_base(avctx,
+ frame->nb_samples);
+ }
+ avpkt->dts = avpkt->pts;
+ } else {
+ avpkt->size = 0;
+ }
+ }
+ if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
+ needs_realloc = 0;
+ if (user_pkt.data) {
+ if (user_pkt.size >= avpkt->size) {
+ memcpy(user_pkt.data, avpkt->data, avpkt->size);
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
+ avpkt->size = user_pkt.size;
+ ret = -1;
+ }
+ avpkt->buf = user_pkt.buf;
+ avpkt->data = user_pkt.data;
+ } else {
+ if (av_dup_packet(avpkt) < 0) {
+ ret = AVERROR(ENOMEM);
+ }
+ }
+ }
+
+ if (!ret) {
+ if (needs_realloc && avpkt->data) {
+ ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (ret >= 0)
+ avpkt->data = avpkt->buf->data;
+ }
+
+ avctx->frame_number++;
+ }
+
+ if (ret < 0 || !*got_packet_ptr) {
+ av_packet_unref(avpkt);
+ av_init_packet(avpkt);
+ goto end;
+ }
+
+ /* NOTE: if we add any audio encoders which output non-keyframe packets,
+ * this needs to be moved to the encoders, but for now we can do it
+ * here to simplify things */
+ avpkt->flags |= AV_PKT_FLAG_KEY;
+
+end:
+ av_frame_free(&padded_frame);
+ av_free(extended_frame);
+
+#if FF_API_AUDIOENC_DELAY
+ avctx->delay = avctx->initial_padding;
+#endif
+
+ return ret;
+}
+
+int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
+ AVPacket *avpkt,
+ const AVFrame *frame,
+ int *got_packet_ptr)
+{
+ int ret;
+ AVPacket user_pkt = *avpkt;
+ int needs_realloc = !user_pkt.data;
+
+ *got_packet_ptr = 0;
+
+ if (!avctx->codec->encode2) {
+ av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
+ return AVERROR(ENOSYS);
+ }
+
+ if(CONFIG_FRAME_THREAD_ENCODER &&
+ avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))
+ return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
+
+ if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
+ avctx->stats_out[0] = '\0';
+
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
+ av_packet_unref(avpkt);
+ av_init_packet(avpkt);
+ avpkt->size = 0;
+ return 0;
+ }
+
+ if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
+ return AVERROR(EINVAL);
+
+ if (frame && frame->format == AV_PIX_FMT_NONE)
+ av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
+ if (frame && (frame->width == 0 || frame->height == 0))
+ av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
+
+ av_assert0(avctx->codec->encode2);
+
+ ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
+ av_assert0(ret <= 0);
+
+ emms_c();
+
+ if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
+ needs_realloc = 0;
+ if (user_pkt.data) {
+ if (user_pkt.size >= avpkt->size) {
+ memcpy(user_pkt.data, avpkt->data, avpkt->size);
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
+ avpkt->size = user_pkt.size;
+ ret = -1;
+ }
+ avpkt->buf = user_pkt.buf;
+ avpkt->data = user_pkt.data;
+ } else {
+ if (av_dup_packet(avpkt) < 0) {
+ ret = AVERROR(ENOMEM);
+ }
+ }
+ }
+
+ if (!ret) {
+ if (!*got_packet_ptr)
+ avpkt->size = 0;
+ else if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
+ avpkt->pts = avpkt->dts = frame->pts;
+
+ if (needs_realloc && avpkt->data) {
+ ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (ret >= 0)
+ avpkt->data = avpkt->buf->data;
+ }
+
+ avctx->frame_number++;
+ }
+
+ if (ret < 0 || !*got_packet_ptr)
+ av_packet_unref(avpkt);
+
+ return ret;
+}
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub)
+{
+ int ret;
+ if (sub->start_display_time) {
+ av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
+ return -1;
+ }
+
+ ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
+ avctx->frame_number++;
+ return ret;
+}
+
+static int do_encode(AVCodecContext *avctx, const AVFrame *frame, int *got_packet)
+{
+ int ret;
+ *got_packet = 0;
+
+ av_packet_unref(avctx->internal->buffer_pkt);
+ avctx->internal->buffer_pkt_valid = 0;
+
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ ret = avcodec_encode_video2(avctx, avctx->internal->buffer_pkt,
+ frame, got_packet);
+ } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ ret = avcodec_encode_audio2(avctx, avctx->internal->buffer_pkt,
+ frame, got_packet);
+ } else {
+ ret = AVERROR(EINVAL);
+ }
+
+ if (ret >= 0 && *got_packet) {
+ // Encoders must always return ref-counted buffers.
+ // Side-data only packets have no data and can be not ref-counted.
+ av_assert0(!avctx->internal->buffer_pkt->data || avctx->internal->buffer_pkt->buf);
+ avctx->internal->buffer_pkt_valid = 1;
+ ret = 0;
+ } else {
+ av_packet_unref(avctx->internal->buffer_pkt);
+ }
+
+ return ret;
+}
+
+int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
+{
+ if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avctx->internal->draining)
+ return AVERROR_EOF;
+
+ if (!frame) {
+ avctx->internal->draining = 1;
+
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
+ return 0;
+ }
+
+ if (avctx->codec->send_frame)
+ return avctx->codec->send_frame(avctx, frame);
+
+ // Emulation via old API. Do it here instead of avcodec_receive_packet, because:
+ // 1. if the AVFrame is not refcounted, the copying will be much more
+ // expensive than copying the packet data
+ // 2. assume few users use non-refcounted AVPackets, so usually no copy is
+ // needed
+
+ if (avctx->internal->buffer_pkt_valid)
+ return AVERROR(EAGAIN);
+
+ return do_encode(avctx, frame, &(int){0});
+}
+
+int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ av_packet_unref(avpkt);
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avctx->codec->receive_packet) {
+ if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
+ return AVERROR_EOF;
+ return avctx->codec->receive_packet(avctx, avpkt);
+ }
+
+ // Emulation via old API.
+
+ if (!avctx->internal->buffer_pkt_valid) {
+ int got_packet;
+ int ret;
+ if (!avctx->internal->draining)
+ return AVERROR(EAGAIN);
+ ret = do_encode(avctx, NULL, &got_packet);
+ if (ret < 0)
+ return ret;
+ if (ret >= 0 && !got_packet)
+ return AVERROR_EOF;
+ }
+
+ av_packet_move_ref(avpkt, avctx->internal->buffer_pkt);
+ avctx->internal->buffer_pkt_valid = 0;
+ return 0;
+}
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index ff95cea..d726ff7 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -1734,337 +1734,6 @@ FF_ENABLE_DEPRECATION_WARNINGS
goto end;
}
-int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
-{
- if (avpkt->size < 0) {
- av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
- return AVERROR(EINVAL);
- }
- if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
- av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
- size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
- return AVERROR(EINVAL);
- }
-
- if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
- av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
- if (!avpkt->data || avpkt->size < size) {
- av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
- avpkt->data = avctx->internal->byte_buffer;
- avpkt->size = avctx->internal->byte_buffer_size;
- }
- }
-
- if (avpkt->data) {
- AVBufferRef *buf = avpkt->buf;
-
- if (avpkt->size < size) {
- av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
- return AVERROR(EINVAL);
- }
-
- av_init_packet(avpkt);
- avpkt->buf = buf;
- avpkt->size = size;
- return 0;
- } else {
- int ret = av_new_packet(avpkt, size);
- if (ret < 0)
- av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
- return ret;
- }
-}
-
-int ff_alloc_packet(AVPacket *avpkt, int size)
-{
- return ff_alloc_packet2(NULL, avpkt, size, 0);
-}
-
-/**
- * Pad last frame with silence.
- */
-static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
-{
- AVFrame *frame = NULL;
- int ret;
-
- if (!(frame = av_frame_alloc()))
- return AVERROR(ENOMEM);
-
- frame->format = src->format;
- frame->channel_layout = src->channel_layout;
- av_frame_set_channels(frame, av_frame_get_channels(src));
- frame->nb_samples = s->frame_size;
- ret = av_frame_get_buffer(frame, 32);
- if (ret < 0)
- goto fail;
-
- ret = av_frame_copy_props(frame, src);
- if (ret < 0)
- goto fail;
-
- if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
- src->nb_samples, s->channels, s->sample_fmt)) < 0)
- goto fail;
- if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
- frame->nb_samples - src->nb_samples,
- s->channels, s->sample_fmt)) < 0)
- goto fail;
-
- *dst = frame;
-
- return 0;
-
-fail:
- av_frame_free(&frame);
- return ret;
-}
-
-int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
-{
- AVFrame *extended_frame = NULL;
- AVFrame *padded_frame = NULL;
- int ret;
- AVPacket user_pkt = *avpkt;
- int needs_realloc = !user_pkt.data;
-
- *got_packet_ptr = 0;
-
- if (!avctx->codec->encode2) {
- av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- return 0;
- }
-
- /* ensure that extended_data is properly set */
- if (frame && !frame->extended_data) {
- if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
- avctx->channels > AV_NUM_DATA_POINTERS) {
- av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
- "with more than %d channels, but extended_data is not set.\n",
- AV_NUM_DATA_POINTERS);
- return AVERROR(EINVAL);
- }
- av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
-
- extended_frame = av_frame_alloc();
- if (!extended_frame)
- return AVERROR(ENOMEM);
-
- memcpy(extended_frame, frame, sizeof(AVFrame));
- extended_frame->extended_data = extended_frame->data;
- frame = extended_frame;
- }
-
- /* extract audio service type metadata */
- if (frame) {
- AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
- if (sd && sd->size >= sizeof(enum AVAudioServiceType))
- avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
- }
-
- /* check for valid frame size */
- if (frame) {
- if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
- if (frame->nb_samples > avctx->frame_size) {
- av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
- ret = AVERROR(EINVAL);
- goto end;
- }
- } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
- if (frame->nb_samples < avctx->frame_size &&
- !avctx->internal->last_audio_frame) {
- ret = pad_last_frame(avctx, &padded_frame, frame);
- if (ret < 0)
- goto end;
-
- frame = padded_frame;
- avctx->internal->last_audio_frame = 1;
- }
-
- if (frame->nb_samples != avctx->frame_size) {
- av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
- ret = AVERROR(EINVAL);
- goto end;
- }
- }
- }
-
- av_assert0(avctx->codec->encode2);
-
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
- if (!ret) {
- if (*got_packet_ptr) {
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
- if (avpkt->pts == AV_NOPTS_VALUE)
- avpkt->pts = frame->pts;
- if (!avpkt->duration)
- avpkt->duration = ff_samples_to_time_base(avctx,
- frame->nb_samples);
- }
- avpkt->dts = avpkt->pts;
- } else {
- avpkt->size = 0;
- }
- }
- if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
- needs_realloc = 0;
- if (user_pkt.data) {
- if (user_pkt.size >= avpkt->size) {
- memcpy(user_pkt.data, avpkt->data, avpkt->size);
- } else {
- av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
- avpkt->size = user_pkt.size;
- ret = -1;
- }
- avpkt->buf = user_pkt.buf;
- avpkt->data = user_pkt.data;
- } else {
- if (av_dup_packet(avpkt) < 0) {
- ret = AVERROR(ENOMEM);
- }
- }
- }
-
- if (!ret) {
- if (needs_realloc && avpkt->data) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (ret >= 0)
- avpkt->data = avpkt->buf->data;
- }
-
- avctx->frame_number++;
- }
-
- if (ret < 0 || !*got_packet_ptr) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- goto end;
- }
-
- /* NOTE: if we add any audio encoders which output non-keyframe packets,
- * this needs to be moved to the encoders, but for now we can do it
- * here to simplify things */
- avpkt->flags |= AV_PKT_FLAG_KEY;
-
-end:
- av_frame_free(&padded_frame);
- av_free(extended_frame);
-
-#if FF_API_AUDIOENC_DELAY
- avctx->delay = avctx->initial_padding;
-#endif
-
- return ret;
-}
-
-int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
-{
- int ret;
- AVPacket user_pkt = *avpkt;
- int needs_realloc = !user_pkt.data;
-
- *got_packet_ptr = 0;
-
- if (!avctx->codec->encode2) {
- av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if(CONFIG_FRAME_THREAD_ENCODER &&
- avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))
- return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
-
- if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
- avctx->stats_out[0] = '\0';
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- avpkt->size = 0;
- return 0;
- }
-
- if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
- return AVERROR(EINVAL);
-
- if (frame && frame->format == AV_PIX_FMT_NONE)
- av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
- if (frame && (frame->width == 0 || frame->height == 0))
- av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
-
- av_assert0(avctx->codec->encode2);
-
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
- av_assert0(ret <= 0);
-
- emms_c();
-
- if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
- needs_realloc = 0;
- if (user_pkt.data) {
- if (user_pkt.size >= avpkt->size) {
- memcpy(user_pkt.data, avpkt->data, avpkt->size);
- } else {
- av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
- avpkt->size = user_pkt.size;
- ret = -1;
- }
- avpkt->buf = user_pkt.buf;
- avpkt->data = user_pkt.data;
- } else {
- if (av_dup_packet(avpkt) < 0) {
- ret = AVERROR(ENOMEM);
- }
- }
- }
-
- if (!ret) {
- if (!*got_packet_ptr)
- avpkt->size = 0;
- else if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- avpkt->pts = avpkt->dts = frame->pts;
-
- if (needs_realloc && avpkt->data) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (ret >= 0)
- avpkt->data = avpkt->buf->data;
- }
-
- avctx->frame_number++;
- }
-
- if (ret < 0 || !*got_packet_ptr)
- av_packet_unref(avpkt);
-
- return ret;
-}
-
-int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
- const AVSubtitle *sub)
-{
- int ret;
- if (sub->start_display_time) {
- av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
- return -1;
- }
-
- ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
- avctx->frame_number++;
- return ret;
-}
-
/**
* Attempt to guess proper monotonic timestamps for decoded video frames
* which might have incorrect times. Input timestamps may wrap around, in
@@ -2968,99 +2637,6 @@ int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *fr
return 0;
}
-static int do_encode(AVCodecContext *avctx, const AVFrame *frame, int *got_packet)
-{
- int ret;
- *got_packet = 0;
-
- av_packet_unref(avctx->internal->buffer_pkt);
- avctx->internal->buffer_pkt_valid = 0;
-
- if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
- ret = avcodec_encode_video2(avctx, avctx->internal->buffer_pkt,
- frame, got_packet);
- } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
- ret = avcodec_encode_audio2(avctx, avctx->internal->buffer_pkt,
- frame, got_packet);
- } else {
- ret = AVERROR(EINVAL);
- }
-
- if (ret >= 0 && *got_packet) {
- // Encoders must always return ref-counted buffers.
- // Side-data only packets have no data and can be not ref-counted.
- av_assert0(!avctx->internal->buffer_pkt->data || avctx->internal->buffer_pkt->buf);
- avctx->internal->buffer_pkt_valid = 1;
- ret = 0;
- } else {
- av_packet_unref(avctx->internal->buffer_pkt);
- }
-
- return ret;
-}
-
-int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
-{
- if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
- return AVERROR(EINVAL);
-
- if (avctx->internal->draining)
- return AVERROR_EOF;
-
- if (!frame) {
- avctx->internal->draining = 1;
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- return 0;
- }
-
- if (avctx->codec->send_frame)
- return avctx->codec->send_frame(avctx, frame);
-
- // Emulation via old API. Do it here instead of avcodec_receive_packet, because:
- // 1. if the AVFrame is not refcounted, the copying will be much more
- // expensive than copying the packet data
- // 2. assume few users use non-refcounted AVPackets, so usually no copy is
- // needed
-
- if (avctx->internal->buffer_pkt_valid)
- return AVERROR(EAGAIN);
-
- return do_encode(avctx, frame, &(int){0});
-}
-
-int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
-{
- av_packet_unref(avpkt);
-
- if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
- return AVERROR(EINVAL);
-
- if (avctx->codec->receive_packet) {
- if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- return AVERROR_EOF;
- return avctx->codec->receive_packet(avctx, avpkt);
- }
-
- // Emulation via old API.
-
- if (!avctx->internal->buffer_pkt_valid) {
- int got_packet;
- int ret;
- if (!avctx->internal->draining)
- return AVERROR(EAGAIN);
- ret = do_encode(avctx, NULL, &got_packet);
- if (ret < 0)
- return ret;
- if (ret >= 0 && !got_packet)
- return AVERROR_EOF;
- }
-
- av_packet_move_ref(avpkt, avctx->internal->buffer_pkt);
- avctx->internal->buffer_pkt_valid = 0;
- return 0;
-}
-
av_cold int avcodec_close(AVCodecContext *avctx)
{
int i;
======================================================================
diff --cc libavcodec/Makefile
index a2f200e,fe72315..e33f49d
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@@ -33,13 -25,12 +33,14 @@@ OBJS = allcodecs.
d3d11va.o \
dirac.o \
dv_profile.o \
+ encode.o \
imgconvert.o \
- log2_tab.o \
+ jni.o \
mathtables.o \
+ mediacodec.o \
mpeg12framerate.o \
options.o \
+ mjpegenc_huffman.o \
parser.o \
profiles.o \
qsv_api.o \
diff --cc libavcodec/encode.c
index 0000000,fc27a07..6e9d487
mode 000000,100644..100644
--- a/libavcodec/encode.c
+++ b/libavcodec/encode.c
@@@ -1,0 -1,360 +1,454 @@@
+ /*
+ * generic encoding-related code
+ *
- * This file is part of Libav.
++ * This file is part of FFmpeg.
+ *
- * Libav is free software; you can redistribute it and/or
++ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
- * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
++ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+ #include "libavutil/attributes.h"
+ #include "libavutil/avassert.h"
+ #include "libavutil/frame.h"
+ #include "libavutil/imgutils.h"
+ #include "libavutil/internal.h"
+ #include "libavutil/samplefmt.h"
+
+ #include "avcodec.h"
++#include "frame_thread_encoder.h"
+ #include "internal.h"
+
-int ff_alloc_packet(AVPacket *avpkt, int size)
++int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
+ {
- if (size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
++ if (avpkt->size < 0) {
++ av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
++ return AVERROR(EINVAL);
++ }
++ if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
++ av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
++ size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
+ return AVERROR(EINVAL);
++ }
++
++ if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
++ av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
++ if (!avpkt->data || avpkt->size < size) {
++ av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
++ avpkt->data = avctx->internal->byte_buffer;
++ avpkt->size = avctx->internal->byte_buffer_size;
++ }
++ }
+
+ if (avpkt->data) {
+ AVBufferRef *buf = avpkt->buf;
+
- if (avpkt->size < size)
++ if (avpkt->size < size) {
++ av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
+ return AVERROR(EINVAL);
++ }
+
+ av_init_packet(avpkt);
+ avpkt->buf = buf;
+ avpkt->size = size;
+ return 0;
+ } else {
- return av_new_packet(avpkt, size);
++ int ret = av_new_packet(avpkt, size);
++ if (ret < 0)
++ av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
++ return ret;
+ }
+ }
+
++int ff_alloc_packet(AVPacket *avpkt, int size)
++{
++ return ff_alloc_packet2(NULL, avpkt, size, 0);
++}
++
+ /**
+ * Pad last frame with silence.
+ */
+ static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
+ {
+ AVFrame *frame = NULL;
+ int ret;
+
+ if (!(frame = av_frame_alloc()))
+ return AVERROR(ENOMEM);
+
+ frame->format = src->format;
+ frame->channel_layout = src->channel_layout;
++ av_frame_set_channels(frame, av_frame_get_channels(src));
+ frame->nb_samples = s->frame_size;
+ ret = av_frame_get_buffer(frame, 32);
+ if (ret < 0)
+ goto fail;
+
+ ret = av_frame_copy_props(frame, src);
+ if (ret < 0)
+ goto fail;
+
+ if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
+ src->nb_samples, s->channels, s->sample_fmt)) < 0)
+ goto fail;
+ if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
+ frame->nb_samples - src->nb_samples,
+ s->channels, s->sample_fmt)) < 0)
+ goto fail;
+
+ *dst = frame;
+
+ return 0;
+
+ fail:
+ av_frame_free(&frame);
+ return ret;
+ }
+
+ int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
+ AVPacket *avpkt,
+ const AVFrame *frame,
+ int *got_packet_ptr)
+ {
- AVFrame tmp;
++ AVFrame *extended_frame = NULL;
+ AVFrame *padded_frame = NULL;
+ int ret;
- int user_packet = !!avpkt->data;
++ AVPacket user_pkt = *avpkt;
++ int needs_realloc = !user_pkt.data;
+
+ *got_packet_ptr = 0;
+
+ if (!avctx->codec->encode2) {
+ av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
+ return AVERROR(ENOSYS);
+ }
+
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
+ av_packet_unref(avpkt);
+ av_init_packet(avpkt);
+ return 0;
+ }
+
+ /* ensure that extended_data is properly set */
+ if (frame && !frame->extended_data) {
+ if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
+ avctx->channels > AV_NUM_DATA_POINTERS) {
+ av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
+ "with more than %d channels, but extended_data is not set.\n",
+ AV_NUM_DATA_POINTERS);
+ return AVERROR(EINVAL);
+ }
+ av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
+
- tmp = *frame;
- tmp.extended_data = tmp.data;
- frame = &tmp;
++ extended_frame = av_frame_alloc();
++ if (!extended_frame)
++ return AVERROR(ENOMEM);
++
++ memcpy(extended_frame, frame, sizeof(AVFrame));
++ extended_frame->extended_data = extended_frame->data;
++ frame = extended_frame;
+ }
+
+ /* extract audio service type metadata */
+ if (frame) {
+ AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
+ if (sd && sd->size >= sizeof(enum AVAudioServiceType))
+ avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
+ }
+
+ /* check for valid frame size */
+ if (frame) {
+ if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
- if (frame->nb_samples > avctx->frame_size)
- return AVERROR(EINVAL);
++ if (frame->nb_samples > avctx->frame_size) {
++ av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
++ ret = AVERROR(EINVAL);
++ goto end;
++ }
+ } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
+ if (frame->nb_samples < avctx->frame_size &&
+ !avctx->internal->last_audio_frame) {
+ ret = pad_last_frame(avctx, &padded_frame, frame);
+ if (ret < 0)
- return ret;
++ goto end;
+
+ frame = padded_frame;
+ avctx->internal->last_audio_frame = 1;
+ }
+
+ if (frame->nb_samples != avctx->frame_size) {
++ av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
+ ret = AVERROR(EINVAL);
+ goto end;
+ }
+ }
+ }
+
++ av_assert0(avctx->codec->encode2);
++
+ ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
+ if (!ret) {
+ if (*got_packet_ptr) {
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
+ if (avpkt->pts == AV_NOPTS_VALUE)
+ avpkt->pts = frame->pts;
+ if (!avpkt->duration)
+ avpkt->duration = ff_samples_to_time_base(avctx,
+ frame->nb_samples);
+ }
+ avpkt->dts = avpkt->pts;
+ } else {
+ avpkt->size = 0;
+ }
++ }
++ if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
++ needs_realloc = 0;
++ if (user_pkt.data) {
++ if (user_pkt.size >= avpkt->size) {
++ memcpy(user_pkt.data, avpkt->data, avpkt->size);
++ } else {
++ av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
++ avpkt->size = user_pkt.size;
++ ret = -1;
++ }
++ avpkt->buf = user_pkt.buf;
++ avpkt->data = user_pkt.data;
++ } else {
++ if (av_dup_packet(avpkt) < 0) {
++ ret = AVERROR(ENOMEM);
++ }
++ }
++ }
+
- if (!user_packet && avpkt->size) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size);
++ if (!ret) {
++ if (needs_realloc && avpkt->data) {
++ ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (ret >= 0)
+ avpkt->data = avpkt->buf->data;
+ }
+
+ avctx->frame_number++;
+ }
+
+ if (ret < 0 || !*got_packet_ptr) {
+ av_packet_unref(avpkt);
+ av_init_packet(avpkt);
+ goto end;
+ }
+
+ /* NOTE: if we add any audio encoders which output non-keyframe packets,
+ * this needs to be moved to the encoders, but for now we can do it
+ * here to simplify things */
+ avpkt->flags |= AV_PKT_FLAG_KEY;
+
+ end:
+ av_frame_free(&padded_frame);
++ av_free(extended_frame);
+
+ #if FF_API_AUDIOENC_DELAY
+ avctx->delay = avctx->initial_padding;
+ #endif
+
+ return ret;
+ }
+
+ int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
+ AVPacket *avpkt,
+ const AVFrame *frame,
+ int *got_packet_ptr)
+ {
+ int ret;
- int user_packet = !!avpkt->data;
++ AVPacket user_pkt = *avpkt;
++ int needs_realloc = !user_pkt.data;
+
+ *got_packet_ptr = 0;
+
+ if (!avctx->codec->encode2) {
+ av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
+ return AVERROR(ENOSYS);
+ }
+
++ if(CONFIG_FRAME_THREAD_ENCODER &&
++ avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))
++ return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
++
++ if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
++ avctx->stats_out[0] = '\0';
++
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
+ av_packet_unref(avpkt);
+ av_init_packet(avpkt);
+ avpkt->size = 0;
+ return 0;
+ }
+
- if (av_image_check_size(avctx->width, avctx->height, 0, avctx))
++ if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
+ return AVERROR(EINVAL);
+
++ if (frame && frame->format == AV_PIX_FMT_NONE)
++ av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
++ if (frame && (frame->width == 0 || frame->height == 0))
++ av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
++
+ av_assert0(avctx->codec->encode2);
+
+ ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
++ av_assert0(ret <= 0);
++
++ emms_c();
++
++ if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
++ needs_realloc = 0;
++ if (user_pkt.data) {
++ if (user_pkt.size >= avpkt->size) {
++ memcpy(user_pkt.data, avpkt->data, avpkt->size);
++ } else {
++ av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
++ avpkt->size = user_pkt.size;
++ ret = -1;
++ }
++ avpkt->buf = user_pkt.buf;
++ avpkt->data = user_pkt.data;
++ } else {
++ if (av_dup_packet(avpkt) < 0) {
++ ret = AVERROR(ENOMEM);
++ }
++ }
++ }
++
+ if (!ret) {
+ if (!*got_packet_ptr)
+ avpkt->size = 0;
+ else if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
+ avpkt->pts = avpkt->dts = frame->pts;
+
- if (!user_packet && avpkt->size) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size);
++ if (needs_realloc && avpkt->data) {
++ ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
+ if (ret >= 0)
+ avpkt->data = avpkt->buf->data;
+ }
+
+ avctx->frame_number++;
+ }
+
+ if (ret < 0 || !*got_packet_ptr)
+ av_packet_unref(avpkt);
+
- emms_c();
+ return ret;
+ }
+
+ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub)
+ {
+ int ret;
+ if (sub->start_display_time) {
+ av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
+ return -1;
+ }
- if (sub->num_rects == 0 || !sub->rects)
- return -1;
++
+ ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
+ avctx->frame_number++;
+ return ret;
+ }
+
+ static int do_encode(AVCodecContext *avctx, const AVFrame *frame, int *got_packet)
+ {
+ int ret;
+ *got_packet = 0;
+
+ av_packet_unref(avctx->internal->buffer_pkt);
+ avctx->internal->buffer_pkt_valid = 0;
+
+ if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+ ret = avcodec_encode_video2(avctx, avctx->internal->buffer_pkt,
+ frame, got_packet);
+ } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+ ret = avcodec_encode_audio2(avctx, avctx->internal->buffer_pkt,
+ frame, got_packet);
+ } else {
+ ret = AVERROR(EINVAL);
+ }
+
+ if (ret >= 0 && *got_packet) {
+ // Encoders must always return ref-counted buffers.
+ // Side-data only packets have no data and can be not ref-counted.
+ av_assert0(!avctx->internal->buffer_pkt->data || avctx->internal->buffer_pkt->buf);
+ avctx->internal->buffer_pkt_valid = 1;
+ ret = 0;
+ } else {
+ av_packet_unref(avctx->internal->buffer_pkt);
+ }
+
+ return ret;
+ }
+
+ int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
+ {
+ if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avctx->internal->draining)
+ return AVERROR_EOF;
+
+ if (!frame) {
+ avctx->internal->draining = 1;
+
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
+ return 0;
+ }
+
+ if (avctx->codec->send_frame)
+ return avctx->codec->send_frame(avctx, frame);
+
+ // Emulation via old API. Do it here instead of avcodec_receive_packet, because:
+ // 1. if the AVFrame is not refcounted, the copying will be much more
+ // expensive than copying the packet data
+ // 2. assume few users use non-refcounted AVPackets, so usually no copy is
+ // needed
+
+ if (avctx->internal->buffer_pkt_valid)
+ return AVERROR(EAGAIN);
+
+ return do_encode(avctx, frame, &(int){0});
+ }
+
+ int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
+ {
+ av_packet_unref(avpkt);
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avctx->codec->receive_packet) {
+ if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
+ return AVERROR_EOF;
+ return avctx->codec->receive_packet(avctx, avpkt);
+ }
+
+ // Emulation via old API.
+
+ if (!avctx->internal->buffer_pkt_valid) {
+ int got_packet;
+ int ret;
+ if (!avctx->internal->draining)
+ return AVERROR(EAGAIN);
+ ret = do_encode(avctx, NULL, &got_packet);
+ if (ret < 0)
+ return ret;
+ if (ret >= 0 && !got_packet)
+ return AVERROR_EOF;
+ }
+
+ av_packet_move_ref(avpkt, avctx->internal->buffer_pkt);
+ avctx->internal->buffer_pkt_valid = 0;
+ return 0;
+ }
diff --cc libavcodec/utils.c
index ff95cea,d8ba1d5..d726ff7
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@@ -1734,374 -1210,7 +1734,43 @@@ FF_ENABLE_DEPRECATION_WARNING
goto end;
}
- int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
- {
- if (avpkt->size < 0) {
- av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
- return AVERROR(EINVAL);
- }
- if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
- av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
- size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
- return AVERROR(EINVAL);
- }
-
- if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
- av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
- if (!avpkt->data || avpkt->size < size) {
- av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
- avpkt->data = avctx->internal->byte_buffer;
- avpkt->size = avctx->internal->byte_buffer_size;
- }
- }
-
- if (avpkt->data) {
- AVBufferRef *buf = avpkt->buf;
-
- if (avpkt->size < size) {
- av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
- return AVERROR(EINVAL);
- }
-
- av_init_packet(avpkt);
- avpkt->buf = buf;
- avpkt->size = size;
- return 0;
- } else {
- int ret = av_new_packet(avpkt, size);
- if (ret < 0)
- av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
- return ret;
- }
- }
-
- int ff_alloc_packet(AVPacket *avpkt, int size)
- {
- return ff_alloc_packet2(NULL, avpkt, size, 0);
- }
-
- /**
- * Pad last frame with silence.
- */
- static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
- {
- AVFrame *frame = NULL;
- int ret;
-
- if (!(frame = av_frame_alloc()))
- return AVERROR(ENOMEM);
-
- frame->format = src->format;
- frame->channel_layout = src->channel_layout;
- av_frame_set_channels(frame, av_frame_get_channels(src));
- frame->nb_samples = s->frame_size;
- ret = av_frame_get_buffer(frame, 32);
- if (ret < 0)
- goto fail;
-
- ret = av_frame_copy_props(frame, src);
- if (ret < 0)
- goto fail;
-
- if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
- src->nb_samples, s->channels, s->sample_fmt)) < 0)
- goto fail;
- if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
- frame->nb_samples - src->nb_samples,
- s->channels, s->sample_fmt)) < 0)
- goto fail;
-
- *dst = frame;
-
- return 0;
-
- fail:
- av_frame_free(&frame);
- return ret;
- }
-
- int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
- {
- AVFrame *extended_frame = NULL;
- AVFrame *padded_frame = NULL;
- int ret;
- AVPacket user_pkt = *avpkt;
- int needs_realloc = !user_pkt.data;
-
- *got_packet_ptr = 0;
-
- if (!avctx->codec->encode2) {
- av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- return 0;
- }
-
- /* ensure that extended_data is properly set */
- if (frame && !frame->extended_data) {
- if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
- avctx->channels > AV_NUM_DATA_POINTERS) {
- av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
- "with more than %d channels, but extended_data is not set.\n",
- AV_NUM_DATA_POINTERS);
- return AVERROR(EINVAL);
- }
- av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
-
- extended_frame = av_frame_alloc();
- if (!extended_frame)
- return AVERROR(ENOMEM);
-
- memcpy(extended_frame, frame, sizeof(AVFrame));
- extended_frame->extended_data = extended_frame->data;
- frame = extended_frame;
- }
-
- /* extract audio service type metadata */
- if (frame) {
- AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
- if (sd && sd->size >= sizeof(enum AVAudioServiceType))
- avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
- }
-
- /* check for valid frame size */
- if (frame) {
- if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
- if (frame->nb_samples > avctx->frame_size) {
- av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
- ret = AVERROR(EINVAL);
- goto end;
- }
- } else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
- if (frame->nb_samples < avctx->frame_size &&
- !avctx->internal->last_audio_frame) {
- ret = pad_last_frame(avctx, &padded_frame, frame);
- if (ret < 0)
- goto end;
-
- frame = padded_frame;
- avctx->internal->last_audio_frame = 1;
- }
-
- if (frame->nb_samples != avctx->frame_size) {
- av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
- ret = AVERROR(EINVAL);
- goto end;
- }
- }
- }
-
- av_assert0(avctx->codec->encode2);
-
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
- if (!ret) {
- if (*got_packet_ptr) {
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
- if (avpkt->pts == AV_NOPTS_VALUE)
- avpkt->pts = frame->pts;
- if (!avpkt->duration)
- avpkt->duration = ff_samples_to_time_base(avctx,
- frame->nb_samples);
- }
- avpkt->dts = avpkt->pts;
- } else {
- avpkt->size = 0;
- }
- }
- if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
- needs_realloc = 0;
- if (user_pkt.data) {
- if (user_pkt.size >= avpkt->size) {
- memcpy(user_pkt.data, avpkt->data, avpkt->size);
- } else {
- av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
- avpkt->size = user_pkt.size;
- ret = -1;
- }
- avpkt->buf = user_pkt.buf;
- avpkt->data = user_pkt.data;
- } else {
- if (av_dup_packet(avpkt) < 0) {
- ret = AVERROR(ENOMEM);
- }
- }
- }
-
- if (!ret) {
- if (needs_realloc && avpkt->data) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (ret >= 0)
- avpkt->data = avpkt->buf->data;
- }
-
- avctx->frame_number++;
- }
-
- if (ret < 0 || !*got_packet_ptr) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- goto end;
- }
-
- /* NOTE: if we add any audio encoders which output non-keyframe packets,
- * this needs to be moved to the encoders, but for now we can do it
- * here to simplify things */
- avpkt->flags |= AV_PKT_FLAG_KEY;
-
- end:
- av_frame_free(&padded_frame);
- av_free(extended_frame);
-
- #if FF_API_AUDIOENC_DELAY
- avctx->delay = avctx->initial_padding;
- #endif
-
- return ret;
- }
-
- int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
- AVPacket *avpkt,
- const AVFrame *frame,
- int *got_packet_ptr)
- {
- int ret;
- AVPacket user_pkt = *avpkt;
- int needs_realloc = !user_pkt.data;
-
- *got_packet_ptr = 0;
-
- if (!avctx->codec->encode2) {
- av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if(CONFIG_FRAME_THREAD_ENCODER &&
- avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))
- return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
-
- if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
- avctx->stats_out[0] = '\0';
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
- av_packet_unref(avpkt);
- av_init_packet(avpkt);
- avpkt->size = 0;
- return 0;
- }
-
- if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
- return AVERROR(EINVAL);
-
- if (frame && frame->format == AV_PIX_FMT_NONE)
- av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
- if (frame && (frame->width == 0 || frame->height == 0))
- av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
-
- av_assert0(avctx->codec->encode2);
-
- ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
- av_assert0(ret <= 0);
-
- emms_c();
-
- if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
- needs_realloc = 0;
- if (user_pkt.data) {
- if (user_pkt.size >= avpkt->size) {
- memcpy(user_pkt.data, avpkt->data, avpkt->size);
- } else {
- av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
- avpkt->size = user_pkt.size;
- ret = -1;
- }
- avpkt->buf = user_pkt.buf;
- avpkt->data = user_pkt.data;
- } else {
- if (av_dup_packet(avpkt) < 0) {
- ret = AVERROR(ENOMEM);
- }
- }
- }
-
- if (!ret) {
- if (!*got_packet_ptr)
- avpkt->size = 0;
- else if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- avpkt->pts = avpkt->dts = frame->pts;
-
- if (needs_realloc && avpkt->data) {
- ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
- if (ret >= 0)
- avpkt->data = avpkt->buf->data;
- }
-
- avctx->frame_number++;
- }
-
- if (ret < 0 || !*got_packet_ptr)
- av_packet_unref(avpkt);
-
- return ret;
- }
-
- int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
- const AVSubtitle *sub)
- {
- int ret;
- if (sub->start_display_time) {
- av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
- return -1;
- }
-
- ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
- avctx->frame_number++;
- return ret;
- }
-
-static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt)
+/**
+ * Attempt to guess proper monotonic timestamps for decoded video frames
+ * which might have incorrect times. Input timestamps may wrap around, in
+ * which case the output will as well.
+ *
+ * @param pts the pts field of the decoded AVPacket, as passed through
+ * AVFrame.pts
+ * @param dts the dts field of the decoded AVPacket
+ * @return one of the input values, may be AV_NOPTS_VALUE
+ */
+static int64_t guess_correct_pts(AVCodecContext *ctx,
+ int64_t reordered_pts, int64_t dts)
+{
+ int64_t pts = AV_NOPTS_VALUE;
+
+ if (dts != AV_NOPTS_VALUE) {
+ ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts;
+ ctx->pts_correction_last_dts = dts;
+ } else if (reordered_pts != AV_NOPTS_VALUE)
+ ctx->pts_correction_last_dts = reordered_pts;
+
+ if (reordered_pts != AV_NOPTS_VALUE) {
+ ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
+ ctx->pts_correction_last_pts = reordered_pts;
+ } else if(dts != AV_NOPTS_VALUE)
+ ctx->pts_correction_last_pts = dts;
+
+ if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
+ && reordered_pts != AV_NOPTS_VALUE)
+ pts = reordered_pts;
+ else
+ pts = dts;
+
+ return pts;
+}
+
+static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
{
int size = 0, ret;
const uint8_t *data;
More information about the ffmpeg-cvslog
mailing list