[FFmpeg-cvslog] Merge commit '061a0c14bb5767bca72e3a7227ca400de439ba09'
James Almer
git at videolan.org
Sun Apr 23 02:32:41 EEST 2017
ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Sat Apr 22 20:08:42 2017 -0300| [bddb2343b6e594e312dadb5d21b408702929ae04] | committer: James Almer
Merge commit '061a0c14bb5767bca72e3a7227ca400de439ba09'
* commit '061a0c14bb5767bca72e3a7227ca400de439ba09':
decode: restructure the core decoding code
CUVID decoder adapted by wm4.
Merged-by: James Almer <jamrial at gmail.com>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=bddb2343b6e594e312dadb5d21b408702929ae04
---
libavcodec/avcodec.h | 18 +-
libavcodec/cuvid.c | 25 +-
libavcodec/decode.c | 650 ++++++++++++++++++++++++--------------------------
libavcodec/decode.h | 35 +++
libavcodec/internal.h | 18 ++
libavcodec/utils.c | 22 +-
6 files changed, 424 insertions(+), 344 deletions(-)
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index fc928a1804..95c65a6f78 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -3764,20 +3764,22 @@ typedef struct AVCodec {
int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);
int (*close)(AVCodecContext *);
/**
- * Decode/encode API with decoupled packet/frame dataflow. The API is the
+ * Encode API with decoupled packet/frame dataflow. The API is the
* same as the avcodec_ prefixed APIs (avcodec_send_frame() etc.), except
* that:
* - never called if the codec is closed or the wrong type,
- * - AVPacket parameter change side data is applied right before calling
- * AVCodec->send_packet,
- * - if AV_CODEC_CAP_DELAY is not set, drain packets or frames are never sent,
- * - only one drain packet is ever passed down (until the next flush()),
- * - a drain AVPacket is always NULL (no need to check for avpkt->size).
+ * - if AV_CODEC_CAP_DELAY is not set, drain frames are never sent,
+ * - only one drain frame is ever passed down,
*/
int (*send_frame)(AVCodecContext *avctx, const AVFrame *frame);
- int (*send_packet)(AVCodecContext *avctx, const AVPacket *avpkt);
- int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame);
int (*receive_packet)(AVCodecContext *avctx, AVPacket *avpkt);
+
+ /**
+ * Decode API with decoupled packet/frame dataflow. This function is called
+ * to get one output frame. It should call ff_decode_get_packet() to obtain
+ * input data.
+ */
+ int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame);
/**
* Flush buffers.
* Will be called when seeking
diff --git a/libavcodec/cuvid.c b/libavcodec/cuvid.c
index 916d7e9987..4d96cf0204 100644
--- a/libavcodec/cuvid.c
+++ b/libavcodec/cuvid.c
@@ -31,6 +31,7 @@
#include "libavutil/pixdesc.h"
#include "avcodec.h"
+#include "decode.h"
#include "internal.h"
typedef struct CuvidContext
@@ -357,6 +358,13 @@ static int CUDAAPI cuvid_handle_picture_display(void *opaque, CUVIDPARSERDISPINF
return 1;
}
+static int cuvid_is_buffer_full(AVCodecContext *avctx)
+{
+ CuvidContext *ctx = avctx->priv_data;
+
+ return (av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame)) + 2 > ctx->nb_surfaces;
+}
+
static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
{
CuvidContext *ctx = avctx->priv_data;
@@ -373,7 +381,7 @@ static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
if (is_flush && avpkt && avpkt->size)
return AVERROR_EOF;
- if ((av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame)) + 2 > ctx->nb_surfaces && avpkt && avpkt->size)
+ if (cuvid_is_buffer_full(avctx) && avpkt && avpkt->size)
return AVERROR(EAGAIN);
if (ctx->bsf && avpkt && avpkt->size) {
@@ -464,6 +472,20 @@ static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame)
return ret;
}
+ if (!cuvid_is_buffer_full(avctx)) {
+ AVPacket pkt = {0};
+ ret = ff_decode_get_packet(avctx, &pkt);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ ret = cuvid_decode_packet(avctx, &pkt);
+ av_packet_unref(&pkt);
+ // cuvid_is_buffer_full() should avoid this.
+ if (ret == AVERROR(EAGAIN))
+ ret = AVERROR_EXTERNAL;
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ }
+
ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
if (ret < 0)
return ret;
@@ -1026,7 +1048,6 @@ static const AVOption options[] = {
.init = cuvid_decode_init, \
.close = cuvid_decode_end, \
.decode = cuvid_decode_frame, \
- .send_packet = cuvid_decode_packet, \
.receive_frame = cuvid_output_frame, \
.flush = cuvid_flush, \
.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
diff --git a/libavcodec/decode.c b/libavcodec/decode.c
index 5c8b4cbf56..bc0ab7a5ca 100644
--- a/libavcodec/decode.c
+++ b/libavcodec/decode.c
@@ -28,6 +28,7 @@
#endif
#include "libavutil/avassert.h"
+#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/common.h"
#include "libavutil/frame.h"
@@ -37,6 +38,7 @@
#include "avcodec.h"
#include "bytestream.h"
+#include "decode.h"
#include "internal.h"
#include "thread.h"
@@ -177,6 +179,36 @@ static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
return 0;
}
+int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (avci->draining)
+ return AVERROR_EOF;
+
+ if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data_elems)
+ return AVERROR(EAGAIN);
+
+ av_packet_move_ref(pkt, avci->buffer_pkt);
+
+ ret = extract_packet_props(avctx->internal, pkt);
+ if (ret < 0)
+ goto finish;
+
+ ret = apply_param_change(avctx, pkt);
+ if (ret < 0)
+ goto finish;
+
+ if (avctx->codec->receive_frame)
+ avci->compat_decode_consumed += pkt->size;
+
+ return 0;
+finish:
+ av_packet_unref(pkt);
+ return ret;
+}
+
/**
* Attempt to guess proper monotonic timestamps for decoded video frames
* which might have incorrect times. Input timestamps may wrap around, in
@@ -213,345 +245,98 @@ static int64_t guess_correct_pts(AVCodecContext *ctx,
return pts;
}
-static int do_decode(AVCodecContext *avctx, AVPacket *pkt)
+/*
+ * The core of the receive_frame_wrapper for the decoders implementing
+ * the simple API. Certain decoders might consume partial packets without
+ * returning any output, so this function needs to be called in a loop until it
+ * returns EAGAIN.
+ **/
+static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
{
- int got_frame = 0;
+ AVCodecInternal *avci = avctx->internal;
+ DecodeSimpleContext *ds = &avci->ds;
+ AVPacket *pkt = ds->in_pkt;
+ // copy to ensure we do not change pkt
+ AVPacket tmp;
+ int got_frame, did_split;
int ret;
- av_assert0(!avctx->internal->buffer_frame->buf[0]);
-
- if (!pkt)
- pkt = avctx->internal->buffer_pkt;
-
- // This is the lesser evil. The field is for compatibility with legacy users
- // of the legacy API, and users using the new API should not be forced to
- // even know about this field.
- avctx->refcounted_frames = 1;
+ if (!pkt->data && !avci->draining) {
+ av_packet_unref(pkt);
+ ret = ff_decode_get_packet(avctx, pkt);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ }
// Some codecs (at least wma lossless) will crash when feeding drain packets
// after EOF was signaled.
- if (avctx->internal->draining_done)
+ if (avci->draining_done)
return AVERROR_EOF;
- if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
- ret = avcodec_decode_video2(avctx, avctx->internal->buffer_frame,
- &got_frame, pkt);
- if (ret >= 0 && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
- ret = pkt->size;
- } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
- ret = avcodec_decode_audio4(avctx, avctx->internal->buffer_frame,
- &got_frame, pkt);
- } else {
- ret = AVERROR(EINVAL);
- }
-
- if (ret == AVERROR(EAGAIN))
- ret = pkt->size;
-
- if (avctx->internal->draining && !got_frame)
- avctx->internal->draining_done = 1;
-
- if (ret < 0)
- return ret;
-
- if (ret >= pkt->size) {
- av_packet_unref(avctx->internal->buffer_pkt);
- } else {
- int consumed = ret;
-
- if (pkt != avctx->internal->buffer_pkt) {
- av_packet_unref(avctx->internal->buffer_pkt);
- if ((ret = av_packet_ref(avctx->internal->buffer_pkt, pkt)) < 0)
- return ret;
- }
-
- avctx->internal->buffer_pkt->data += consumed;
- avctx->internal->buffer_pkt->size -= consumed;
- avctx->internal->buffer_pkt->pts = AV_NOPTS_VALUE;
- avctx->internal->buffer_pkt->dts = AV_NOPTS_VALUE;
- }
-
- if (got_frame)
- av_assert0(avctx->internal->buffer_frame->buf[0]);
-
- return 0;
-}
-
-int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
-{
- int ret;
-
- if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
- return AVERROR(EINVAL);
-
- if (avctx->internal->draining)
+ if (!pkt->data &&
+ !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
+ avctx->active_thread_type & FF_THREAD_FRAME))
return AVERROR_EOF;
- if (avpkt && !avpkt->size && avpkt->data)
- return AVERROR(EINVAL);
-
- if (!avpkt || !avpkt->size) {
- avctx->internal->draining = 1;
- avpkt = NULL;
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- return 0;
- }
-
- if (avctx->codec->send_packet) {
- if (avpkt) {
- AVPacket tmp = *avpkt;
+ tmp = *pkt;
#if FF_API_MERGE_SD
FF_DISABLE_DEPRECATION_WARNINGS
- int did_split = av_packet_split_side_data(&tmp);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
- ret = apply_param_change(avctx, &tmp);
- if (ret >= 0)
- ret = avctx->codec->send_packet(avctx, &tmp);
-#if FF_API_MERGE_SD
- if (did_split)
- av_packet_free_side_data(&tmp);
-#endif
- return ret;
- } else {
- return avctx->codec->send_packet(avctx, NULL);
- }
- }
-
- // Emulation via old API. Assume avpkt is likely not refcounted, while
- // decoder output is always refcounted, and avoid copying.
-
- if (avctx->internal->buffer_pkt->size || avctx->internal->buffer_frame->buf[0])
- return AVERROR(EAGAIN);
-
- // The goal is decoding the first frame of the packet without using memcpy,
- // because the common case is having only 1 frame per packet (especially
- // with video, but audio too). In other cases, it can't be avoided, unless
- // the user is feeding refcounted packets.
- return do_decode(avctx, (AVPacket *)avpkt);
-}
-
-int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
-{
- int ret;
-
- av_frame_unref(frame);
-
- if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
- return AVERROR(EINVAL);
-
- if (avctx->codec->receive_frame) {
- if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- return AVERROR_EOF;
- ret = avctx->codec->receive_frame(avctx, frame);
- if (ret >= 0) {
- if (av_frame_get_best_effort_timestamp(frame) == AV_NOPTS_VALUE) {
- av_frame_set_best_effort_timestamp(frame,
- guess_correct_pts(avctx, frame->pts, frame->pkt_dts));
- }
- }
- return ret;
- }
-
- // Emulation via old API.
-
- if (!avctx->internal->buffer_frame->buf[0]) {
- if (!avctx->internal->buffer_pkt->size && !avctx->internal->draining)
- return AVERROR(EAGAIN);
-
- while (1) {
- if ((ret = do_decode(avctx, avctx->internal->buffer_pkt)) < 0) {
- av_packet_unref(avctx->internal->buffer_pkt);
- return ret;
- }
- // Some audio decoders may consume partial data without returning
- // a frame (fate-wmapro-2ch). There is no way to make the caller
- // call avcodec_receive_frame() again without returning a frame,
- // so try to decode more in these cases.
- if (avctx->internal->buffer_frame->buf[0] ||
- !avctx->internal->buffer_pkt->size)
- break;
- }
- }
-
- if (!avctx->internal->buffer_frame->buf[0])
- return avctx->internal->draining ? AVERROR_EOF : AVERROR(EAGAIN);
-
- av_frame_move_ref(frame, avctx->internal->buffer_frame);
- return 0;
-}
-
-int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
- int *got_picture_ptr,
- const AVPacket *avpkt)
-{
- AVCodecInternal *avci = avctx->internal;
- int ret;
- // copy to ensure we do not change avpkt
- AVPacket tmp = *avpkt;
+ did_split = av_packet_split_side_data(&tmp);
- if (!avctx->codec)
- return AVERROR(EINVAL);
- if (avctx->codec->type != AVMEDIA_TYPE_VIDEO) {
- av_log(avctx, AV_LOG_ERROR, "Invalid media type for video\n");
- return AVERROR(EINVAL);
- }
+ if (did_split) {
+ ret = extract_packet_props(avctx->internal, &tmp);
+ if (ret < 0)
+ return ret;
- if (!avctx->codec->decode) {
- av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n");
- return AVERROR(ENOSYS);
+ ret = apply_param_change(avctx, &tmp);
+ if (ret < 0)
+ return ret;
}
-
- *got_picture_ptr = 0;
- if ((avctx->coded_width || avctx->coded_height) && av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
- return AVERROR(EINVAL);
-
- ret = extract_packet_props(avci, avpkt);
- if (ret < 0)
- return ret;
- ret = apply_param_change(avctx, avpkt);
- if (ret < 0)
- return ret;
-
- av_frame_unref(picture);
-
- if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size ||
- (avctx->active_thread_type & FF_THREAD_FRAME)) {
-#if FF_API_MERGE_SD
-FF_DISABLE_DEPRECATION_WARNINGS
- int did_split = av_packet_split_side_data(&tmp);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
- ret = apply_param_change(avctx, &tmp);
- if (ret < 0)
- goto fail;
- ret = extract_packet_props(avci, &tmp);
- if (ret < 0)
- return ret;
- if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
- ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
- &tmp);
- else {
- ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
- &tmp);
- if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
- picture->pkt_dts = avpkt->dts;
+ got_frame = 0;
- if(!avctx->has_b_frames){
- av_frame_set_pkt_pos(picture, avpkt->pos);
- }
+ if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
+ ret = ff_thread_decode_frame(avctx, frame, &got_frame, &tmp);
+ } else {
+ ret = avctx->codec->decode(avctx, frame, &got_frame, &tmp);
+
+ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
+ frame->pkt_dts = pkt->dts;
+ if(!avctx->has_b_frames)
+ av_frame_set_pkt_pos(frame, pkt->pos);
//FIXME these should be under if(!avctx->has_b_frames)
/* get_buffer is supposed to set frame parameters */
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
- if (!picture->sample_aspect_ratio.num) picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
- if (!picture->width) picture->width = avctx->width;
- if (!picture->height) picture->height = avctx->height;
- if (picture->format == AV_PIX_FMT_NONE) picture->format = avctx->pix_fmt;
+ if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
+ if (!frame->width) frame->width = avctx->width;
+ if (!frame->height) frame->height = avctx->height;
+ if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
}
+ } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ frame->pkt_dts = pkt->dts;
}
-
-fail:
- emms_c(); //needed to avoid an emms_c() call before every return;
-
-#if FF_API_MERGE_SD
- if (did_split) {
- av_packet_free_side_data(&tmp);
- if(ret == tmp.size)
- ret = avpkt->size;
- }
-#endif
- if (picture->flags & AV_FRAME_FLAG_DISCARD) {
- *got_picture_ptr = 0;
- }
- if (*got_picture_ptr) {
- if (!avctx->refcounted_frames) {
- int err = unrefcount_frame(avci, picture);
- if (err < 0)
- return err;
- }
-
- avctx->frame_number++;
- av_frame_set_best_effort_timestamp(picture,
- guess_correct_pts(avctx,
- picture->pts,
- picture->pkt_dts));
- } else
- av_frame_unref(picture);
- } else
- ret = 0;
-
- /* many decoders assign whole AVFrames, thus overwriting extended_data;
- * make sure it's set correctly */
- av_assert0(!picture->extended_data || picture->extended_data == picture->data);
-
-#if FF_API_AVCTX_TIMEBASE
- if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
- avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
-#endif
-
- return ret;
-}
-
-int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
- AVFrame *frame,
- int *got_frame_ptr,
- const AVPacket *avpkt)
-{
- AVCodecInternal *avci = avctx->internal;
- int ret = 0;
-
- *got_frame_ptr = 0;
-
- if (!avctx->codec)
- return AVERROR(EINVAL);
-
- if (!avctx->codec->decode) {
- av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if (!avpkt->data && avpkt->size) {
- av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
- return AVERROR(EINVAL);
- }
- if (avctx->codec->type != AVMEDIA_TYPE_AUDIO) {
- av_log(avctx, AV_LOG_ERROR, "Invalid media type for audio\n");
- return AVERROR(EINVAL);
}
+ emms_c();
- av_frame_unref(frame);
-
- if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
+ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
+ if (frame->flags & AV_FRAME_FLAG_DISCARD)
+ got_frame = 0;
+ if (got_frame)
+ av_frame_set_best_effort_timestamp(frame,
+ guess_correct_pts(avctx,
+ frame->pts,
+ frame->pkt_dts));
+ } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
uint8_t *side;
int side_size;
uint32_t discard_padding = 0;
uint8_t skip_reason = 0;
uint8_t discard_reason = 0;
- // copy to ensure we do not change avpkt
- AVPacket tmp = *avpkt;
-#if FF_API_MERGE_SD
-FF_DISABLE_DEPRECATION_WARNINGS
- int did_split = av_packet_split_side_data(&tmp);
-FF_ENABLE_DEPRECATION_WARNINGS
-#endif
- ret = apply_param_change(avctx, &tmp);
- if (ret < 0)
- goto fail;
- ret = extract_packet_props(avci, &tmp);
- if (ret < 0)
- return ret;
- if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
- ret = ff_thread_decode_frame(avctx, frame, got_frame_ptr, &tmp);
- else {
- ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp);
- av_assert0(ret <= tmp.size);
- frame->pkt_dts = avpkt->dts;
- }
- if (ret >= 0 && *got_frame_ptr) {
- avctx->frame_number++;
+ if (ret >= 0 && got_frame) {
av_frame_set_best_effort_timestamp(frame,
guess_correct_pts(avctx,
frame->pts,
@@ -566,7 +351,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
frame->sample_rate = avctx->sample_rate;
}
- side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
+ side= av_packet_get_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
if(side && side_size>=10) {
avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier;
discard_padding = AV_RL32(side + 4);
@@ -576,16 +361,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
discard_reason = AV_RL8(side + 9);
}
- if ((frame->flags & AV_FRAME_FLAG_DISCARD) && *got_frame_ptr &&
+ if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
- *got_frame_ptr = 0;
+ got_frame = 0;
}
- if (avctx->internal->skip_samples > 0 && *got_frame_ptr &&
+ if (avctx->internal->skip_samples > 0 && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if(frame->nb_samples <= avctx->internal->skip_samples){
- *got_frame_ptr = 0;
+ got_frame = 0;
avctx->internal->skip_samples -= frame->nb_samples;
av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
avctx->internal->skip_samples);
@@ -618,10 +403,10 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
- if (discard_padding > 0 && discard_padding <= frame->nb_samples && *got_frame_ptr &&
+ if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
!(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
if (discard_padding == frame->nb_samples) {
- *got_frame_ptr = 0;
+ got_frame = 0;
} else {
if(avctx->pkt_timebase.num && avctx->sample_rate) {
int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
@@ -637,7 +422,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
- if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && *got_frame_ptr) {
+ if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
if (fside) {
AV_WL32(fside->data, avctx->internal->skip_samples);
@@ -647,36 +432,232 @@ FF_ENABLE_DEPRECATION_WARNINGS
avctx->internal->skip_samples = 0;
}
}
-fail:
+ }
#if FF_API_MERGE_SD
- if (did_split) {
- av_packet_free_side_data(&tmp);
- if(ret == tmp.size)
- ret = avpkt->size;
- }
+ if (did_split) {
+ av_packet_free_side_data(&tmp);
+ if(ret == tmp.size)
+ ret = pkt->size;
+ }
#endif
- if (ret >= 0 && *got_frame_ptr) {
+ if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
+ !avci->showed_multi_packet_warning &&
+ ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
+ av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
+ avci->showed_multi_packet_warning = 1;
+ }
+
+ if (!got_frame)
+ av_frame_unref(frame);
+
+ if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
+ ret = pkt->size;
+
+#if FF_API_AVCTX_TIMEBASE
+ if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
+ avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
+#endif
+
+ if (avctx->internal->draining && !got_frame)
+ avci->draining_done = 1;
+
+ avci->compat_decode_consumed += ret;
+
+ if (ret >= pkt->size || ret < 0) {
+ av_packet_unref(pkt);
+ } else {
+ int consumed = ret;
+
+ pkt->data += consumed;
+ pkt->size -= consumed;
+ pkt->pts = AV_NOPTS_VALUE;
+ pkt->dts = AV_NOPTS_VALUE;
+ avci->last_pkt_props->pts = AV_NOPTS_VALUE;
+ avci->last_pkt_props->dts = AV_NOPTS_VALUE;
+ }
+
+ if (got_frame)
+ av_assert0(frame->buf[0]);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ int ret;
+
+ while (!frame->buf[0]) {
+ ret = decode_simple_internal(avctx, frame);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ av_assert0(!frame->buf[0]);
+
+ if (avctx->codec->receive_frame)
+ ret = avctx->codec->receive_frame(avctx, frame);
+ else
+ ret = decode_simple_receive_frame(avctx, frame);
+
+ if (ret == AVERROR_EOF)
+ avci->draining_done = 1;
+
+ return ret;
+}
+
+int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avctx->internal->draining)
+ return AVERROR_EOF;
+
+ if (avci->buffer_pkt->data || avci->buffer_pkt->side_data_elems)
+ return AVERROR(EAGAIN);
+
+ if (avpkt && !avpkt->size && avpkt->data)
+ return AVERROR(EINVAL);
+
+ if (!avpkt || !avpkt->size) {
+ avctx->internal->draining = 1;
+ } else {
+ ret = av_packet_ref(avci->buffer_pkt, avpkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!avci->buffer_frame->buf[0]) {
+ ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
+ if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ return 0;
+}
+
+int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ av_frame_unref(frame);
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avci->buffer_frame->buf[0]) {
+ av_frame_move_ref(frame, avci->buffer_frame);
+ } else {
+ ret = decode_receive_frame_internal(avctx, frame);
+ if (ret < 0)
+ return ret;
+ }
+
+ avctx->frame_number++;
+
+ return 0;
+}
+
+static int compat_decode(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame, const AVPacket *pkt)
+{
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ av_assert0(avci->compat_decode_consumed == 0);
+
+ *got_frame = 0;
+ avci->compat_decode = 1;
+
+ if (avci->compat_decode_partial_size > 0 &&
+ avci->compat_decode_partial_size != pkt->size) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Got unexpected packet size after a partial decode\n");
+ ret = AVERROR(EINVAL);
+ goto finish;
+ }
+
+ if (!avci->compat_decode_partial_size) {
+ ret = avcodec_send_packet(avctx, pkt);
+ if (ret == AVERROR_EOF)
+ ret = 0;
+ else if (ret == AVERROR(EAGAIN)) {
+ /* we fully drain all the output in each decode call, so this should not
+ * ever happen */
+ ret = AVERROR_BUG;
+ goto finish;
+ } else if (ret < 0)
+ goto finish;
+ }
+
+ while (ret >= 0) {
+ ret = avcodec_receive_frame(avctx, frame);
+ if (ret < 0) {
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ ret = 0;
+ goto finish;
+ }
+
+ if (frame != avci->compat_decode_frame) {
if (!avctx->refcounted_frames) {
- int err = unrefcount_frame(avci, frame);
- if (err < 0)
- return err;
+ ret = unrefcount_frame(avci, frame);
+ if (ret < 0)
+ goto finish;
}
- } else
- av_frame_unref(frame);
- }
- av_assert0(ret <= avpkt->size);
+ *got_frame = 1;
+ frame = avci->compat_decode_frame;
+ } else {
+ if (!avci->compat_decode_warned) {
+ av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
+ "API cannot return all the frames for this decoder. "
+ "Some frames will be dropped. Update your code to the "
+ "new decoding API to fix this.\n");
+ avci->compat_decode_warned = 1;
+ }
+ }
- if (!avci->showed_multi_packet_warning &&
- ret >= 0 && ret != avpkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
- av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
- avci->showed_multi_packet_warning = 1;
+ if (avci->draining || avci->compat_decode_consumed < pkt->size)
+ break;
}
+finish:
+ if (ret == 0)
+ ret = FFMIN(avci->compat_decode_consumed, pkt->size);
+ avci->compat_decode_consumed = 0;
+ avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
+
return ret;
}
+int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
+ const AVPacket *avpkt)
+{
+ return compat_decode(avctx, picture, got_picture_ptr, avpkt);
+}
+
+int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
+ AVFrame *frame,
+ int *got_frame_ptr,
+ const AVPacket *avpkt)
+{
+ return compat_decode(avctx, frame, got_frame_ptr, avpkt);
+}
+
static void get_subtitle_defaults(AVSubtitle *sub)
{
memset(sub, 0, sizeof(*sub));
@@ -1554,9 +1535,12 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
avctx->internal->draining = 0;
avctx->internal->draining_done = 0;
av_frame_unref(avctx->internal->buffer_frame);
+ av_frame_unref(avctx->internal->compat_decode_frame);
av_packet_unref(avctx->internal->buffer_pkt);
avctx->internal->buffer_pkt_valid = 0;
+ av_packet_unref(avctx->internal->ds.in_pkt);
+
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
ff_thread_flush(avctx);
else if (avctx->codec->flush)
diff --git a/libavcodec/decode.h b/libavcodec/decode.h
new file mode 100644
index 0000000000..20a46b692a
--- /dev/null
+++ b/libavcodec/decode.h
@@ -0,0 +1,35 @@
+/*
+ * generic decoding-related code
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DECODE_H
+#define AVCODEC_DECODE_H
+
+/**
+ * Called by decoders to get the next packet for decoding.
+ *
+ * @param pkt An empty packet to be filled with data.
+ * @return 0 if a new reference has been successfully written to pkt
+ * AVERROR(EAGAIN) if no data is currently available
+ * AVERROR_EOF if and end of stream has been reached, so no more data
+ * will be available
+ */
+int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt);
+
+#endif /* AVCODEC_DECODE_H */
diff --git a/libavcodec/internal.h b/libavcodec/internal.h
index 90a887332e..2fd27d8431 100644
--- a/libavcodec/internal.h
+++ b/libavcodec/internal.h
@@ -101,6 +101,11 @@ typedef struct FramePool {
int samples;
} FramePool;
+typedef struct DecodeSimpleContext {
+ AVPacket *in_pkt;
+ AVFrame *out_frame;
+} DecodeSimpleContext;
+
typedef struct AVCodecInternal {
/**
* Whether the parent AVCodecContext is a copy of the context which had
@@ -137,6 +142,8 @@ typedef struct AVCodecInternal {
void *thread_ctx;
+ DecodeSimpleContext ds;
+
/**
* Properties (timestamps+side data) extracted from the last packet passed
* for decoding.
@@ -173,6 +180,17 @@ typedef struct AVCodecInternal {
int buffer_pkt_valid; // encoding: packet without data can be valid
AVFrame *buffer_frame;
int draining_done;
+ /* set to 1 when the caller is using the old decoding API */
+ int compat_decode;
+ int compat_decode_warned;
+ /* this variable is set by the decoder internals to signal to the old
+ * API compat wrappers the amount of data consumed from the last packet */
+ size_t compat_decode_consumed;
+ /* when a partial packet has been consumed, this stores the remaining size
+ * of the packet (that should be submitted in the next decode call */
+ size_t compat_decode_partial_size;
+ AVFrame *compat_decode_frame;
+
int showed_multi_packet_warning;
int skip_samples_multiplier;
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index 6a68971d68..e50d640976 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -172,7 +172,7 @@ int av_codec_is_encoder(const AVCodec *codec)
int av_codec_is_decoder(const AVCodec *codec)
{
- return codec && (codec->decode || codec->send_packet);
+ return codec && (codec->decode || codec->receive_frame);
}
av_cold void avcodec_register(AVCodec *codec)
@@ -672,6 +672,12 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
goto free_and_end;
}
+ avctx->internal->compat_decode_frame = av_frame_alloc();
+ if (!avctx->internal->compat_decode_frame) {
+ ret = AVERROR(ENOMEM);
+ goto free_and_end;
+ }
+
avctx->internal->buffer_frame = av_frame_alloc();
if (!avctx->internal->buffer_frame) {
ret = AVERROR(ENOMEM);
@@ -684,6 +690,12 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
goto free_and_end;
}
+ avctx->internal->ds.in_pkt = av_packet_alloc();
+ if (!avctx->internal->ds.in_pkt) {
+ ret = AVERROR(ENOMEM);
+ goto free_and_end;
+ }
+
avctx->internal->last_pkt_props = av_packet_alloc();
if (!avctx->internal->last_pkt_props) {
ret = AVERROR(ENOMEM);
@@ -1114,9 +1126,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_freep(&avctx->priv_data);
if (avctx->internal) {
av_frame_free(&avctx->internal->to_free);
+ av_frame_free(&avctx->internal->compat_decode_frame);
av_frame_free(&avctx->internal->buffer_frame);
av_packet_free(&avctx->internal->buffer_pkt);
av_packet_free(&avctx->internal->last_pkt_props);
+
+ av_packet_free(&avctx->internal->ds.in_pkt);
+
av_freep(&avctx->internal->pool);
}
av_freep(&avctx->internal);
@@ -1163,9 +1179,13 @@ av_cold int avcodec_close(AVCodecContext *avctx)
avctx->internal->byte_buffer_size = 0;
av_freep(&avctx->internal->byte_buffer);
av_frame_free(&avctx->internal->to_free);
+ av_frame_free(&avctx->internal->compat_decode_frame);
av_frame_free(&avctx->internal->buffer_frame);
av_packet_free(&avctx->internal->buffer_pkt);
av_packet_free(&avctx->internal->last_pkt_props);
+
+ av_packet_free(&avctx->internal->ds.in_pkt);
+
for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
av_buffer_pool_uninit(&pool->pools[i]);
av_freep(&avctx->internal->pool);
======================================================================
diff --cc libavcodec/cuvid.c
index 916d7e9987,0000000000..4d96cf0204
mode 100644,000000..100644
--- a/libavcodec/cuvid.c
+++ b/libavcodec/cuvid.c
@@@ -1,1074 -1,0 +1,1095 @@@
+/*
+ * Nvidia CUVID decoder
+ * Copyright (c) 2016 Timo Rothenpieler <timo at rothenpieler.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "compat/cuda/dynlink_loader.h"
+
+#include "libavutil/buffer.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_cuda_internal.h"
+#include "libavutil/fifo.h"
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+
+#include "avcodec.h"
++#include "decode.h"
+#include "internal.h"
+
+typedef struct CuvidContext
+{
+ AVClass *avclass;
+
+ CUvideodecoder cudecoder;
+ CUvideoparser cuparser;
+
+ char *cu_gpu;
+ int nb_surfaces;
+ int drop_second_field;
+ char *crop_expr;
+ char *resize_expr;
+
+ struct {
+ int left;
+ int top;
+ int right;
+ int bottom;
+ } crop;
+
+ struct {
+ int width;
+ int height;
+ } resize;
+
+ AVBufferRef *hwdevice;
+ AVBufferRef *hwframe;
+
+ AVBSFContext *bsf;
+
+ AVFifoBuffer *frame_queue;
+
+ int deint_mode;
+ int deint_mode_current;
+ int64_t prev_pts;
+
+ int internal_error;
+ int decoder_flushing;
+
+ cudaVideoCodec codec_type;
+ cudaVideoChromaFormat chroma_format;
+
+ CUVIDPARSERPARAMS cuparseinfo;
+ CUVIDEOFORMATEX cuparse_ext;
+
+ CudaFunctions *cudl;
+ CuvidFunctions *cvdl;
+} CuvidContext;
+
+typedef struct CuvidParsedFrame
+{
+ CUVIDPARSERDISPINFO dispinfo;
+ int second_field;
+ int is_deinterlacing;
+} CuvidParsedFrame;
+
+static int check_cu(AVCodecContext *avctx, CUresult err, const char *func)
+{
+ CuvidContext *ctx = avctx->priv_data;
+ const char *err_name;
+ const char *err_string;
+
+ av_log(avctx, AV_LOG_TRACE, "Calling %s\n", func);
+
+ if (err == CUDA_SUCCESS)
+ return 0;
+
+ ctx->cudl->cuGetErrorName(err, &err_name);
+ ctx->cudl->cuGetErrorString(err, &err_string);
+
+ av_log(avctx, AV_LOG_ERROR, "%s failed", func);
+ if (err_name && err_string)
+ av_log(avctx, AV_LOG_ERROR, " -> %s: %s", err_name, err_string);
+ av_log(avctx, AV_LOG_ERROR, "\n");
+
+ return AVERROR_EXTERNAL;
+}
+
+#define CHECK_CU(x) check_cu(avctx, (x), #x)
+
+static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format)
+{
+ AVCodecContext *avctx = opaque;
+ CuvidContext *ctx = avctx->priv_data;
+ AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
+ CUVIDDECODECREATEINFO cuinfo;
+ int surface_fmt;
+
+ int old_width = avctx->width;
+ int old_height = avctx->height;
+
+ enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
+ AV_PIX_FMT_NONE, // Will be updated below
+ AV_PIX_FMT_NONE };
+
+ av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence);
+
+ memset(&cuinfo, 0, sizeof(cuinfo));
+
+ ctx->internal_error = 0;
+
+ avctx->coded_width = cuinfo.ulWidth = format->coded_width;
+ avctx->coded_height = cuinfo.ulHeight = format->coded_height;
+
+ // apply cropping
+ cuinfo.display_area.left = format->display_area.left + ctx->crop.left;
+ cuinfo.display_area.top = format->display_area.top + ctx->crop.top;
+ cuinfo.display_area.right = format->display_area.right - ctx->crop.right;
+ cuinfo.display_area.bottom = format->display_area.bottom - ctx->crop.bottom;
+
+ // width and height need to be set before calling ff_get_format
+ if (ctx->resize_expr) {
+ avctx->width = ctx->resize.width;
+ avctx->height = ctx->resize.height;
+ } else {
+ avctx->width = cuinfo.display_area.right - cuinfo.display_area.left;
+ avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top;
+ }
+
+ // target width/height need to be multiples of two
+ cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1;
+ cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1;
+
+ // aspect ratio conversion, 1:1, depends on scaled resolution
+ cuinfo.target_rect.left = 0;
+ cuinfo.target_rect.top = 0;
+ cuinfo.target_rect.right = cuinfo.ulTargetWidth;
+ cuinfo.target_rect.bottom = cuinfo.ulTargetHeight;
+
+ switch (format->bit_depth_luma_minus8) {
+ case 0: // 8-bit
+ pix_fmts[1] = AV_PIX_FMT_NV12;
+ break;
+ case 2: // 10-bit
+ pix_fmts[1] = AV_PIX_FMT_P010;
+ break;
+ case 4: // 12-bit
+ pix_fmts[1] = AV_PIX_FMT_P016;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "unsupported bit depth: %d\n",
+ format->bit_depth_luma_minus8 + 8);
+ ctx->internal_error = AVERROR(EINVAL);
+ return 0;
+ }
+ surface_fmt = ff_get_format(avctx, pix_fmts);
+ if (surface_fmt < 0) {
+ av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", surface_fmt);
+ ctx->internal_error = AVERROR(EINVAL);
+ return 0;
+ }
+
+ av_log(avctx, AV_LOG_VERBOSE, "Formats: Original: %s | HW: %s | SW: %s\n",
+ av_get_pix_fmt_name(avctx->pix_fmt),
+ av_get_pix_fmt_name(surface_fmt),
+ av_get_pix_fmt_name(avctx->sw_pix_fmt));
+
+ avctx->pix_fmt = surface_fmt;
+
+ // Update our hwframe ctx, as the get_format callback might have refreshed it!
+ if (avctx->hw_frames_ctx) {
+ av_buffer_unref(&ctx->hwframe);
+
+ ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
+ if (!ctx->hwframe) {
+ ctx->internal_error = AVERROR(ENOMEM);
+ return 0;
+ }
+
+ hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
+ }
+
+ ff_set_sar(avctx, av_div_q(
+ (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y },
+ (AVRational){ avctx->width, avctx->height }));
+
+ ctx->deint_mode_current = format->progressive_sequence
+ ? cudaVideoDeinterlaceMode_Weave
+ : ctx->deint_mode;
+
+ if (!format->progressive_sequence && ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave)
+ avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
+ else
+ avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT;
+
+ if (format->video_signal_description.video_full_range_flag)
+ avctx->color_range = AVCOL_RANGE_JPEG;
+ else
+ avctx->color_range = AVCOL_RANGE_MPEG;
+
+ avctx->color_primaries = format->video_signal_description.color_primaries;
+ avctx->color_trc = format->video_signal_description.transfer_characteristics;
+ avctx->colorspace = format->video_signal_description.matrix_coefficients;
+
+ if (format->bitrate)
+ avctx->bit_rate = format->bitrate;
+
+ if (format->frame_rate.numerator && format->frame_rate.denominator) {
+ avctx->framerate.num = format->frame_rate.numerator;
+ avctx->framerate.den = format->frame_rate.denominator;
+ }
+
+ if (ctx->cudecoder
+ && avctx->coded_width == format->coded_width
+ && avctx->coded_height == format->coded_height
+ && avctx->width == old_width
+ && avctx->height == old_height
+ && ctx->chroma_format == format->chroma_format
+ && ctx->codec_type == format->codec)
+ return 1;
+
+ if (ctx->cudecoder) {
+ av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n");
+ ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder));
+ if (ctx->internal_error < 0)
+ return 0;
+ ctx->cudecoder = NULL;
+ }
+
+ if (hwframe_ctx->pool && (
+ hwframe_ctx->width < avctx->width ||
+ hwframe_ctx->height < avctx->height ||
+ hwframe_ctx->format != AV_PIX_FMT_CUDA ||
+ hwframe_ctx->sw_format != avctx->sw_pix_fmt)) {
+ av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n");
+ av_log(avctx, AV_LOG_DEBUG, "width: %d <-> %d\n", hwframe_ctx->width, avctx->width);
+ av_log(avctx, AV_LOG_DEBUG, "height: %d <-> %d\n", hwframe_ctx->height, avctx->height);
+ av_log(avctx, AV_LOG_DEBUG, "format: %s <-> cuda\n", av_get_pix_fmt_name(hwframe_ctx->format));
+ av_log(avctx, AV_LOG_DEBUG, "sw_format: %s <-> %s\n",
+ av_get_pix_fmt_name(hwframe_ctx->sw_format), av_get_pix_fmt_name(avctx->sw_pix_fmt));
+ ctx->internal_error = AVERROR(EINVAL);
+ return 0;
+ }
+
+ if (format->chroma_format != cudaVideoChromaFormat_420) {
+ av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n");
+ ctx->internal_error = AVERROR(EINVAL);
+ return 0;
+ }
+
+ ctx->chroma_format = format->chroma_format;
+
+ cuinfo.CodecType = ctx->codec_type = format->codec;
+ cuinfo.ChromaFormat = format->chroma_format;
+
+ switch (avctx->sw_pix_fmt) {
+ case AV_PIX_FMT_NV12:
+ cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;
+ break;
+ case AV_PIX_FMT_P010:
+ case AV_PIX_FMT_P016:
+ cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016;
+ break;
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Output formats other than NV12, P010 or P016 are not supported\n");
+ ctx->internal_error = AVERROR(EINVAL);
+ return 0;
+ }
+
+ cuinfo.ulNumDecodeSurfaces = ctx->nb_surfaces;
+ cuinfo.ulNumOutputSurfaces = 1;
+ cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
+ cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8;
+ cuinfo.DeinterlaceMode = ctx->deint_mode_current;
+
+ if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field)
+ avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1});
+
+ ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidCreateDecoder(&ctx->cudecoder, &cuinfo));
+ if (ctx->internal_error < 0)
+ return 0;
+
+ if (!hwframe_ctx->pool) {
+ hwframe_ctx->format = AV_PIX_FMT_CUDA;
+ hwframe_ctx->sw_format = avctx->sw_pix_fmt;
+ hwframe_ctx->width = avctx->width;
+ hwframe_ctx->height = avctx->height;
+
+ if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n");
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int CUDAAPI cuvid_handle_picture_decode(void *opaque, CUVIDPICPARAMS* picparams)
+{
+ AVCodecContext *avctx = opaque;
+ CuvidContext *ctx = avctx->priv_data;
+
+ av_log(avctx, AV_LOG_TRACE, "pfnDecodePicture\n");
+
+ ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDecodePicture(ctx->cudecoder, picparams));
+ if (ctx->internal_error < 0)
+ return 0;
+
+ return 1;
+}
+
+static int CUDAAPI cuvid_handle_picture_display(void *opaque, CUVIDPARSERDISPINFO* dispinfo)
+{
+ AVCodecContext *avctx = opaque;
+ CuvidContext *ctx = avctx->priv_data;
+ CuvidParsedFrame parsed_frame = { { 0 } };
+
+ parsed_frame.dispinfo = *dispinfo;
+ ctx->internal_error = 0;
+
+ if (ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave) {
+ av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
+ } else {
+ parsed_frame.is_deinterlacing = 1;
+ av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
+ if (!ctx->drop_second_field) {
+ parsed_frame.second_field = 1;
+ av_fifo_generic_write(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
+ }
+ }
+
+ return 1;
+}
+
++static int cuvid_is_buffer_full(AVCodecContext *avctx)
++{
++ CuvidContext *ctx = avctx->priv_data;
++
++ return (av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame)) + 2 > ctx->nb_surfaces;
++}
++
+static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
+{
+ CuvidContext *ctx = avctx->priv_data;
+ AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
+ AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
+ CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
+ CUVIDSOURCEDATAPACKET cupkt;
+ AVPacket filter_packet = { 0 };
+ AVPacket filtered_packet = { 0 };
+ int ret = 0, eret = 0, is_flush = ctx->decoder_flushing;
+
+ av_log(avctx, AV_LOG_TRACE, "cuvid_decode_packet\n");
+
+ if (is_flush && avpkt && avpkt->size)
+ return AVERROR_EOF;
+
- if ((av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame)) + 2 > ctx->nb_surfaces && avpkt && avpkt->size)
++ if (cuvid_is_buffer_full(avctx) && avpkt && avpkt->size)
+ return AVERROR(EAGAIN);
+
+ if (ctx->bsf && avpkt && avpkt->size) {
+ if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "av_packet_ref failed\n");
+ return ret;
+ }
+
+ if ((ret = av_bsf_send_packet(ctx->bsf, &filter_packet)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "av_bsf_send_packet failed\n");
+ av_packet_unref(&filter_packet);
+ return ret;
+ }
+
+ if ((ret = av_bsf_receive_packet(ctx->bsf, &filtered_packet)) < 0) {
+ av_log(avctx, AV_LOG_ERROR, "av_bsf_receive_packet failed\n");
+ return ret;
+ }
+
+ avpkt = &filtered_packet;
+ }
+
+ ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
+ if (ret < 0) {
+ av_packet_unref(&filtered_packet);
+ return ret;
+ }
+
+ memset(&cupkt, 0, sizeof(cupkt));
+
+ if (avpkt && avpkt->size) {
+ cupkt.payload_size = avpkt->size;
+ cupkt.payload = avpkt->data;
+
+ if (avpkt->pts != AV_NOPTS_VALUE) {
+ cupkt.flags = CUVID_PKT_TIMESTAMP;
+ if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
+ cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000});
+ else
+ cupkt.timestamp = avpkt->pts;
+ }
+ } else {
+ cupkt.flags = CUVID_PKT_ENDOFSTREAM;
+ ctx->decoder_flushing = 1;
+ }
+
+ ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &cupkt));
+
+ av_packet_unref(&filtered_packet);
+
+ if (ret < 0)
+ goto error;
+
+ // cuvidParseVideoData doesn't return an error just because stuff failed...
+ if (ctx->internal_error) {
+ av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n");
+ ret = ctx->internal_error;
+ goto error;
+ }
+
+error:
+ eret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
+
+ if (eret < 0)
+ return eret;
+ else if (ret < 0)
+ return ret;
+ else if (is_flush)
+ return AVERROR_EOF;
+ else
+ return 0;
+}
+
+static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ CuvidContext *ctx = avctx->priv_data;
+ AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
+ AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
+ CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
+ CUdeviceptr mapped_frame = 0;
+ int ret = 0, eret = 0;
+
+ av_log(avctx, AV_LOG_TRACE, "cuvid_output_frame\n");
+
+ if (ctx->decoder_flushing) {
+ ret = cuvid_decode_packet(avctx, NULL);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ }
+
++ if (!cuvid_is_buffer_full(avctx)) {
++ AVPacket pkt = {0};
++ ret = ff_decode_get_packet(avctx, &pkt);
++ if (ret < 0 && ret != AVERROR_EOF)
++ return ret;
++ ret = cuvid_decode_packet(avctx, &pkt);
++ av_packet_unref(&pkt);
++ // cuvid_is_buffer_full() should avoid this.
++ if (ret == AVERROR(EAGAIN))
++ ret = AVERROR_EXTERNAL;
++ if (ret < 0 && ret != AVERROR_EOF)
++ return ret;
++ }
++
+ ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
+ if (ret < 0)
+ return ret;
+
+ if (av_fifo_size(ctx->frame_queue)) {
+ CuvidParsedFrame parsed_frame;
+ CUVIDPROCPARAMS params;
+ unsigned int pitch = 0;
+ int offset = 0;
+ int i;
+
+ av_fifo_generic_read(ctx->frame_queue, &parsed_frame, sizeof(CuvidParsedFrame), NULL);
+
+ memset(¶ms, 0, sizeof(params));
+ params.progressive_frame = parsed_frame.dispinfo.progressive_frame;
+ params.second_field = parsed_frame.second_field;
+ params.top_field_first = parsed_frame.dispinfo.top_field_first;
+
+ ret = CHECK_CU(ctx->cvdl->cuvidMapVideoFrame(ctx->cudecoder, parsed_frame.dispinfo.picture_index, &mapped_frame, &pitch, ¶ms));
+ if (ret < 0)
+ goto error;
+
+ if (avctx->pix_fmt == AV_PIX_FMT_CUDA) {
+ ret = av_hwframe_get_buffer(ctx->hwframe, frame, 0);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "av_hwframe_get_buffer failed\n");
+ goto error;
+ }
+
+ ret = ff_decode_frame_props(avctx, frame);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "ff_decode_frame_props failed\n");
+ goto error;
+ }
+
+ for (i = 0; i < 2; i++) {
+ CUDA_MEMCPY2D cpy = {
+ .srcMemoryType = CU_MEMORYTYPE_DEVICE,
+ .dstMemoryType = CU_MEMORYTYPE_DEVICE,
+ .srcDevice = mapped_frame,
+ .dstDevice = (CUdeviceptr)frame->data[i],
+ .srcPitch = pitch,
+ .dstPitch = frame->linesize[i],
+ .srcY = offset,
+ .WidthInBytes = FFMIN(pitch, frame->linesize[i]),
+ .Height = avctx->height >> (i ? 1 : 0),
+ };
+
+ ret = CHECK_CU(ctx->cudl->cuMemcpy2D(&cpy));
+ if (ret < 0)
+ goto error;
+
+ offset += avctx->height;
+ }
+ } else if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
+ avctx->pix_fmt == AV_PIX_FMT_P010 ||
+ avctx->pix_fmt == AV_PIX_FMT_P016) {
+ AVFrame *tmp_frame = av_frame_alloc();
+ if (!tmp_frame) {
+ av_log(avctx, AV_LOG_ERROR, "av_frame_alloc failed\n");
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+
+ tmp_frame->format = AV_PIX_FMT_CUDA;
+ tmp_frame->hw_frames_ctx = av_buffer_ref(ctx->hwframe);
+ tmp_frame->data[0] = (uint8_t*)mapped_frame;
+ tmp_frame->linesize[0] = pitch;
+ tmp_frame->data[1] = (uint8_t*)(mapped_frame + avctx->height * pitch);
+ tmp_frame->linesize[1] = pitch;
+ tmp_frame->width = avctx->width;
+ tmp_frame->height = avctx->height;
+
+ ret = ff_get_buffer(avctx, frame, 0);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "ff_get_buffer failed\n");
+ av_frame_free(&tmp_frame);
+ goto error;
+ }
+
+ ret = av_hwframe_transfer_data(frame, tmp_frame, 0);
+ if (ret) {
+ av_log(avctx, AV_LOG_ERROR, "av_hwframe_transfer_data failed\n");
+ av_frame_free(&tmp_frame);
+ goto error;
+ }
+ av_frame_free(&tmp_frame);
+ } else {
+ ret = AVERROR_BUG;
+ goto error;
+ }
+
+ frame->width = avctx->width;
+ frame->height = avctx->height;
+ if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
+ frame->pts = av_rescale_q(parsed_frame.dispinfo.timestamp, (AVRational){1, 10000000}, avctx->pkt_timebase);
+ else
+ frame->pts = parsed_frame.dispinfo.timestamp;
+
+ if (parsed_frame.second_field) {
+ if (ctx->prev_pts == INT64_MIN) {
+ ctx->prev_pts = frame->pts;
+ frame->pts += (avctx->pkt_timebase.den * avctx->framerate.den) / (avctx->pkt_timebase.num * avctx->framerate.num);
+ } else {
+ int pts_diff = (frame->pts - ctx->prev_pts) / 2;
+ ctx->prev_pts = frame->pts;
+ frame->pts += pts_diff;
+ }
+ }
+
+ /* CUVIDs opaque reordering breaks the internal pkt logic.
+ * So set pkt_pts and clear all the other pkt_ fields.
+ */
+#if FF_API_PKT_PTS
+FF_DISABLE_DEPRECATION_WARNINGS
+ frame->pkt_pts = frame->pts;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ av_frame_set_pkt_pos(frame, -1);
+ av_frame_set_pkt_duration(frame, 0);
+ av_frame_set_pkt_size(frame, -1);
+
+ frame->interlaced_frame = !parsed_frame.is_deinterlacing && !parsed_frame.dispinfo.progressive_frame;
+
+ if (frame->interlaced_frame)
+ frame->top_field_first = parsed_frame.dispinfo.top_field_first;
+ } else if (ctx->decoder_flushing) {
+ ret = AVERROR_EOF;
+ } else {
+ ret = AVERROR(EAGAIN);
+ }
+
+error:
+ if (mapped_frame)
+ eret = CHECK_CU(ctx->cvdl->cuvidUnmapVideoFrame(ctx->cudecoder, mapped_frame));
+
+ eret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
+
+ if (eret < 0)
+ return eret;
+ else
+ return ret;
+}
+
+static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
+{
+ CuvidContext *ctx = avctx->priv_data;
+ AVFrame *frame = data;
+ int ret = 0;
+
+ av_log(avctx, AV_LOG_TRACE, "cuvid_decode_frame\n");
+
+ if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave) {
+ av_log(avctx, AV_LOG_ERROR, "Deinterlacing is not supported via the old API\n");
+ return AVERROR(EINVAL);
+ }
+
+ if (!ctx->decoder_flushing) {
+ ret = cuvid_decode_packet(avctx, avpkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = cuvid_output_frame(avctx, frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+ *got_frame = 0;
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ *got_frame = 1;
+ }
+
+ return 0;
+}
+
+static av_cold int cuvid_decode_end(AVCodecContext *avctx)
+{
+ CuvidContext *ctx = avctx->priv_data;
+
+ av_fifo_freep(&ctx->frame_queue);
+
+ if (ctx->bsf)
+ av_bsf_free(&ctx->bsf);
+
+ if (ctx->cuparser)
+ ctx->cvdl->cuvidDestroyVideoParser(ctx->cuparser);
+
+ if (ctx->cudecoder)
+ ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder);
+
+ ctx->cudl = NULL;
+
+ av_buffer_unref(&ctx->hwframe);
+ av_buffer_unref(&ctx->hwdevice);
+
+ cuvid_free_functions(&ctx->cvdl);
+
+ return 0;
+}
+
+static int cuvid_test_dummy_decoder(AVCodecContext *avctx,
+ const CUVIDPARSERPARAMS *cuparseinfo,
+ int probed_width,
+ int probed_height)
+{
+ CuvidContext *ctx = avctx->priv_data;
+ CUVIDDECODECREATEINFO cuinfo;
+ CUvideodecoder cudec = 0;
+ int ret = 0;
+
+ memset(&cuinfo, 0, sizeof(cuinfo));
+
+ cuinfo.CodecType = cuparseinfo->CodecType;
+ cuinfo.ChromaFormat = cudaVideoChromaFormat_420;
+ cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;
+
+ cuinfo.ulWidth = probed_width;
+ cuinfo.ulHeight = probed_height;
+ cuinfo.ulTargetWidth = cuinfo.ulWidth;
+ cuinfo.ulTargetHeight = cuinfo.ulHeight;
+
+ cuinfo.target_rect.left = 0;
+ cuinfo.target_rect.top = 0;
+ cuinfo.target_rect.right = cuinfo.ulWidth;
+ cuinfo.target_rect.bottom = cuinfo.ulHeight;
+
+ cuinfo.ulNumDecodeSurfaces = ctx->nb_surfaces;
+ cuinfo.ulNumOutputSurfaces = 1;
+ cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
+ cuinfo.bitDepthMinus8 = 0;
+
+ cuinfo.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave;
+
+ ret = CHECK_CU(ctx->cvdl->cuvidCreateDecoder(&cudec, &cuinfo));
+ if (ret < 0)
+ return ret;
+
+ ret = CHECK_CU(ctx->cvdl->cuvidDestroyDecoder(cudec));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static av_cold int cuvid_decode_init(AVCodecContext *avctx)
+{
+ CuvidContext *ctx = avctx->priv_data;
+ AVCUDADeviceContext *device_hwctx;
+ AVHWDeviceContext *device_ctx;
+ AVHWFramesContext *hwframe_ctx;
+ CUVIDSOURCEDATAPACKET seq_pkt;
+ CUcontext cuda_ctx = NULL;
+ CUcontext dummy;
+ const AVBitStreamFilter *bsf;
+ int ret = 0;
+
+ enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_NONE };
+
+ int probed_width = avctx->coded_width ? avctx->coded_width : 1280;
+ int probed_height = avctx->coded_height ? avctx->coded_height : 720;
+
+ // Accelerated transcoding scenarios with 'ffmpeg' require that the
+ // pix_fmt be set to AV_PIX_FMT_CUDA early. The sw_pix_fmt, and the
+ // pix_fmt for non-accelerated transcoding, do not need to be correct
+ // but need to be set to something. We arbitrarily pick NV12.
+ ret = ff_get_format(avctx, pix_fmts);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", ret);
+ return ret;
+ }
+ avctx->pix_fmt = ret;
+
+ if (ctx->resize_expr && sscanf(ctx->resize_expr, "%dx%d",
+ &ctx->resize.width, &ctx->resize.height) != 2) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid resize expressions\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+
+ if (ctx->crop_expr && sscanf(ctx->crop_expr, "%dx%dx%dx%d",
+ &ctx->crop.top, &ctx->crop.bottom,
+ &ctx->crop.left, &ctx->crop.right) != 4) {
+ av_log(avctx, AV_LOG_ERROR, "Invalid cropping expressions\n");
+ ret = AVERROR(EINVAL);
+ goto error;
+ }
+
+ ret = cuvid_load_functions(&ctx->cvdl);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Failed loading nvcuvid.\n");
+ goto error;
+ }
+
+ ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
+ if (!ctx->frame_queue) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+
+ if (avctx->hw_frames_ctx) {
+ ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
+ if (!ctx->hwframe) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+
+ hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
+
+ ctx->hwdevice = av_buffer_ref(hwframe_ctx->device_ref);
+ if (!ctx->hwdevice) {
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+ } else {
+ ret = av_hwdevice_ctx_create(&ctx->hwdevice, AV_HWDEVICE_TYPE_CUDA, ctx->cu_gpu, NULL, 0);
+ if (ret < 0)
+ goto error;
+
+ ctx->hwframe = av_hwframe_ctx_alloc(ctx->hwdevice);
+ if (!ctx->hwframe) {
+ av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
+ ret = AVERROR(ENOMEM);
+ goto error;
+ }
+
+ hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
+ }
+
+ device_ctx = hwframe_ctx->device_ctx;
+ device_hwctx = device_ctx->hwctx;
+
+ cuda_ctx = device_hwctx->cuda_ctx;
+ ctx->cudl = device_hwctx->internal->cuda_dl;
+
+ memset(&ctx->cuparseinfo, 0, sizeof(ctx->cuparseinfo));
+ memset(&ctx->cuparse_ext, 0, sizeof(ctx->cuparse_ext));
+ memset(&seq_pkt, 0, sizeof(seq_pkt));
+
+ ctx->cuparseinfo.pExtVideoInfo = &ctx->cuparse_ext;
+
+ switch (avctx->codec->id) {
+#if CONFIG_H264_CUVID_DECODER
+ case AV_CODEC_ID_H264:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_H264;
+ break;
+#endif
+#if CONFIG_HEVC_CUVID_DECODER
+ case AV_CODEC_ID_HEVC:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_HEVC;
+ break;
+#endif
+#if CONFIG_MJPEG_CUVID_DECODER
+ case AV_CODEC_ID_MJPEG:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_JPEG;
+ break;
+#endif
+#if CONFIG_MPEG1_CUVID_DECODER
+ case AV_CODEC_ID_MPEG1VIDEO:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG1;
+ break;
+#endif
+#if CONFIG_MPEG2_CUVID_DECODER
+ case AV_CODEC_ID_MPEG2VIDEO:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG2;
+ break;
+#endif
+#if CONFIG_MPEG4_CUVID_DECODER
+ case AV_CODEC_ID_MPEG4:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_MPEG4;
+ break;
+#endif
+#if CONFIG_VP8_CUVID_DECODER
+ case AV_CODEC_ID_VP8:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_VP8;
+ break;
+#endif
+#if CONFIG_VP9_CUVID_DECODER
+ case AV_CODEC_ID_VP9:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_VP9;
+ break;
+#endif
+#if CONFIG_VC1_CUVID_DECODER
+ case AV_CODEC_ID_VC1:
+ ctx->cuparseinfo.CodecType = cudaVideoCodec_VC1;
+ break;
+#endif
+ default:
+ av_log(avctx, AV_LOG_ERROR, "Invalid CUVID codec!\n");
+ return AVERROR_BUG;
+ }
+
+ if (avctx->codec->id == AV_CODEC_ID_H264 || avctx->codec->id == AV_CODEC_ID_HEVC) {
+ if (avctx->codec->id == AV_CODEC_ID_H264)
+ bsf = av_bsf_get_by_name("h264_mp4toannexb");
+ else
+ bsf = av_bsf_get_by_name("hevc_mp4toannexb");
+
+ if (!bsf) {
+ ret = AVERROR_BSF_NOT_FOUND;
+ goto error;
+ }
+ if (ret = av_bsf_alloc(bsf, &ctx->bsf)) {
+ goto error;
+ }
+ if (((ret = avcodec_parameters_from_context(ctx->bsf->par_in, avctx)) < 0) || ((ret = av_bsf_init(ctx->bsf)) < 0)) {
+ av_bsf_free(&ctx->bsf);
+ goto error;
+ }
+
+ ctx->cuparse_ext.format.seqhdr_data_length = ctx->bsf->par_out->extradata_size;
+ memcpy(ctx->cuparse_ext.raw_seqhdr_data,
+ ctx->bsf->par_out->extradata,
+ FFMIN(sizeof(ctx->cuparse_ext.raw_seqhdr_data), ctx->bsf->par_out->extradata_size));
+ } else if (avctx->extradata_size > 0) {
+ ctx->cuparse_ext.format.seqhdr_data_length = avctx->extradata_size;
+ memcpy(ctx->cuparse_ext.raw_seqhdr_data,
+ avctx->extradata,
+ FFMIN(sizeof(ctx->cuparse_ext.raw_seqhdr_data), avctx->extradata_size));
+ }
+
+ ctx->cuparseinfo.ulMaxNumDecodeSurfaces = ctx->nb_surfaces;
+ ctx->cuparseinfo.ulMaxDisplayDelay = 4;
+ ctx->cuparseinfo.pUserData = avctx;
+ ctx->cuparseinfo.pfnSequenceCallback = cuvid_handle_video_sequence;
+ ctx->cuparseinfo.pfnDecodePicture = cuvid_handle_picture_decode;
+ ctx->cuparseinfo.pfnDisplayPicture = cuvid_handle_picture_display;
+
+ ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
+ if (ret < 0)
+ goto error;
+
+ ret = cuvid_test_dummy_decoder(avctx, &ctx->cuparseinfo,
+ probed_width,
+ probed_height);
+ if (ret < 0)
+ goto error;
+
+ ret = CHECK_CU(ctx->cvdl->cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
+ if (ret < 0)
+ goto error;
+
+ seq_pkt.payload = ctx->cuparse_ext.raw_seqhdr_data;
+ seq_pkt.payload_size = ctx->cuparse_ext.format.seqhdr_data_length;
+
+ if (seq_pkt.payload && seq_pkt.payload_size) {
+ ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &seq_pkt));
+ if (ret < 0)
+ goto error;
+ }
+
+ ret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
+ if (ret < 0)
+ goto error;
+
+ ctx->prev_pts = INT64_MIN;
+
+ if (!avctx->pkt_timebase.num || !avctx->pkt_timebase.den)
+ av_log(avctx, AV_LOG_WARNING, "Invalid pkt_timebase, passing timestamps as-is.\n");
+
+ return 0;
+
+error:
+ cuvid_decode_end(avctx);
+ return ret;
+}
+
+static void cuvid_flush(AVCodecContext *avctx)
+{
+ CuvidContext *ctx = avctx->priv_data;
+ AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
+ AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
+ CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
+ CUVIDSOURCEDATAPACKET seq_pkt = { 0 };
+ int ret;
+
+ ret = CHECK_CU(ctx->cudl->cuCtxPushCurrent(cuda_ctx));
+ if (ret < 0)
+ goto error;
+
+ av_fifo_freep(&ctx->frame_queue);
+
+ ctx->frame_queue = av_fifo_alloc(ctx->nb_surfaces * sizeof(CuvidParsedFrame));
+ if (!ctx->frame_queue) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to recreate frame queue on flush\n");
+ return;
+ }
+
+ if (ctx->cudecoder) {
+ ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder);
+ ctx->cudecoder = NULL;
+ }
+
+ if (ctx->cuparser) {
+ ctx->cvdl->cuvidDestroyVideoParser(ctx->cuparser);
+ ctx->cuparser = NULL;
+ }
+
+ ret = CHECK_CU(ctx->cvdl->cuvidCreateVideoParser(&ctx->cuparser, &ctx->cuparseinfo));
+ if (ret < 0)
+ goto error;
+
+ seq_pkt.payload = ctx->cuparse_ext.raw_seqhdr_data;
+ seq_pkt.payload_size = ctx->cuparse_ext.format.seqhdr_data_length;
+
+ if (seq_pkt.payload && seq_pkt.payload_size) {
+ ret = CHECK_CU(ctx->cvdl->cuvidParseVideoData(ctx->cuparser, &seq_pkt));
+ if (ret < 0)
+ goto error;
+ }
+
+ ret = CHECK_CU(ctx->cudl->cuCtxPopCurrent(&dummy));
+ if (ret < 0)
+ goto error;
+
+ ctx->prev_pts = INT64_MIN;
+ ctx->decoder_flushing = 0;
+
+ return;
+ error:
+ av_log(avctx, AV_LOG_ERROR, "CUDA reinit on flush failed\n");
+}
+
+#define OFFSET(x) offsetof(CuvidContext, x)
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+static const AVOption options[] = {
+ { "deint", "Set deinterlacing mode", OFFSET(deint_mode), AV_OPT_TYPE_INT, { .i64 = cudaVideoDeinterlaceMode_Weave }, cudaVideoDeinterlaceMode_Weave, cudaVideoDeinterlaceMode_Adaptive, VD, "deint" },
+ { "weave", "Weave deinterlacing (do nothing)", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Weave }, 0, 0, VD, "deint" },
+ { "bob", "Bob deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Bob }, 0, 0, VD, "deint" },
+ { "adaptive", "Adaptive deinterlacing", 0, AV_OPT_TYPE_CONST, { .i64 = cudaVideoDeinterlaceMode_Adaptive }, 0, 0, VD, "deint" },
+ { "gpu", "GPU to be used for decoding", OFFSET(cu_gpu), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
+ { "surfaces", "Maximum surfaces to be used for decoding", OFFSET(nb_surfaces), AV_OPT_TYPE_INT, { .i64 = 25 }, 0, INT_MAX, VD },
+ { "drop_second_field", "Drop second field when deinterlacing", OFFSET(drop_second_field), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
+ { "crop", "Crop (top)x(bottom)x(left)x(right)", OFFSET(crop_expr), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
+ { "resize", "Resize (width)x(height)", OFFSET(resize_expr), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, VD },
+ { NULL }
+};
+
+#define DEFINE_CUVID_CODEC(x, X) \
+ static const AVClass x##_cuvid_class = { \
+ .class_name = #x "_cuvid", \
+ .item_name = av_default_item_name, \
+ .option = options, \
+ .version = LIBAVUTIL_VERSION_INT, \
+ }; \
+ AVHWAccel ff_##x##_cuvid_hwaccel = { \
+ .name = #x "_cuvid", \
+ .type = AVMEDIA_TYPE_VIDEO, \
+ .id = AV_CODEC_ID_##X, \
+ .pix_fmt = AV_PIX_FMT_CUDA, \
+ }; \
+ AVCodec ff_##x##_cuvid_decoder = { \
+ .name = #x "_cuvid", \
+ .long_name = NULL_IF_CONFIG_SMALL("Nvidia CUVID " #X " decoder"), \
+ .type = AVMEDIA_TYPE_VIDEO, \
+ .id = AV_CODEC_ID_##X, \
+ .priv_data_size = sizeof(CuvidContext), \
+ .priv_class = &x##_cuvid_class, \
+ .init = cuvid_decode_init, \
+ .close = cuvid_decode_end, \
+ .decode = cuvid_decode_frame, \
- .send_packet = cuvid_decode_packet, \
+ .receive_frame = cuvid_output_frame, \
+ .flush = cuvid_flush, \
+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
+ .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_CUDA, \
+ AV_PIX_FMT_NV12, \
+ AV_PIX_FMT_P010, \
+ AV_PIX_FMT_P016, \
+ AV_PIX_FMT_NONE }, \
+ };
+
+#if CONFIG_HEVC_CUVID_DECODER
+DEFINE_CUVID_CODEC(hevc, HEVC)
+#endif
+
+#if CONFIG_H264_CUVID_DECODER
+DEFINE_CUVID_CODEC(h264, H264)
+#endif
+
+#if CONFIG_MJPEG_CUVID_DECODER
+DEFINE_CUVID_CODEC(mjpeg, MJPEG)
+#endif
+
+#if CONFIG_MPEG1_CUVID_DECODER
+DEFINE_CUVID_CODEC(mpeg1, MPEG1VIDEO)
+#endif
+
+#if CONFIG_MPEG2_CUVID_DECODER
+DEFINE_CUVID_CODEC(mpeg2, MPEG2VIDEO)
+#endif
+
+#if CONFIG_MPEG4_CUVID_DECODER
+DEFINE_CUVID_CODEC(mpeg4, MPEG4)
+#endif
+
+#if CONFIG_VP8_CUVID_DECODER
+DEFINE_CUVID_CODEC(vp8, VP8)
+#endif
+
+#if CONFIG_VP9_CUVID_DECODER
+DEFINE_CUVID_CODEC(vp9, VP9)
+#endif
+
+#if CONFIG_VC1_CUVID_DECODER
+DEFINE_CUVID_CODEC(vc1, VC1)
+#endif
diff --cc libavcodec/decode.c
index 5c8b4cbf56,a1908ecf4b..bc0ab7a5ca
--- a/libavcodec/decode.c
+++ b/libavcodec/decode.c
@@@ -23,12 -23,8 +23,13 @@@
#include "config.h"
+#if CONFIG_ICONV
+# include <iconv.h>
+#endif
+
#include "libavutil/avassert.h"
+ #include "libavutil/avstring.h"
+#include "libavutil/bprint.h"
#include "libavutil/common.h"
#include "libavutil/frame.h"
#include "libavutil/hwcontext.h"
@@@ -177,663 -154,295 +179,642 @@@ static int unrefcount_frame(AVCodecInte
return 0;
}
+ int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
+ {
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ if (avci->draining)
+ return AVERROR_EOF;
+
+ if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data_elems)
+ return AVERROR(EAGAIN);
+
+ av_packet_move_ref(pkt, avci->buffer_pkt);
+
+ ret = extract_packet_props(avctx->internal, pkt);
+ if (ret < 0)
+ goto finish;
+
+ ret = apply_param_change(avctx, pkt);
+ if (ret < 0)
+ goto finish;
+
+ if (avctx->codec->receive_frame)
+ avci->compat_decode_consumed += pkt->size;
+
+ return 0;
+ finish:
+ av_packet_unref(pkt);
+ return ret;
+ }
+
+/**
+ * Attempt to guess proper monotonic timestamps for decoded video frames
+ * which might have incorrect times. Input timestamps may wrap around, in
+ * which case the output will as well.
+ *
+ * @param pts the pts field of the decoded AVPacket, as passed through
+ * AVFrame.pts
+ * @param dts the dts field of the decoded AVPacket
+ * @return one of the input values, may be AV_NOPTS_VALUE
+ */
+static int64_t guess_correct_pts(AVCodecContext *ctx,
+ int64_t reordered_pts, int64_t dts)
+{
+ int64_t pts = AV_NOPTS_VALUE;
+
+ if (dts != AV_NOPTS_VALUE) {
+ ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts;
+ ctx->pts_correction_last_dts = dts;
+ } else if (reordered_pts != AV_NOPTS_VALUE)
+ ctx->pts_correction_last_dts = reordered_pts;
+
+ if (reordered_pts != AV_NOPTS_VALUE) {
+ ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
+ ctx->pts_correction_last_pts = reordered_pts;
+ } else if(dts != AV_NOPTS_VALUE)
+ ctx->pts_correction_last_pts = dts;
+
+ if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
+ && reordered_pts != AV_NOPTS_VALUE)
+ pts = reordered_pts;
+ else
+ pts = dts;
+
+ return pts;
+}
+
- static int do_decode(AVCodecContext *avctx, AVPacket *pkt)
+ /*
+ * The core of the receive_frame_wrapper for the decoders implementing
+ * the simple API. Certain decoders might consume partial packets without
+ * returning any output, so this function needs to be called in a loop until it
+ * returns EAGAIN.
+ **/
+ static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
{
- int got_frame = 0;
+ AVCodecInternal *avci = avctx->internal;
+ DecodeSimpleContext *ds = &avci->ds;
+ AVPacket *pkt = ds->in_pkt;
- int got_frame;
++ // copy to ensure we do not change pkt
++ AVPacket tmp;
++ int got_frame, did_split;
int ret;
- av_assert0(!avctx->internal->buffer_frame->buf[0]);
-
- if (!pkt)
- pkt = avctx->internal->buffer_pkt;
-
- // This is the lesser evil. The field is for compatibility with legacy users
- // of the legacy API, and users using the new API should not be forced to
- // even know about this field.
- avctx->refcounted_frames = 1;
+ if (!pkt->data && !avci->draining) {
+ av_packet_unref(pkt);
+ ret = ff_decode_get_packet(avctx, pkt);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+ }
// Some codecs (at least wma lossless) will crash when feeding drain packets
// after EOF was signaled.
- if (avctx->internal->draining_done)
+ if (avci->draining_done)
return AVERROR_EOF;
- if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
- ret = avcodec_decode_video2(avctx, avctx->internal->buffer_frame,
- &got_frame, pkt);
- if (ret >= 0 && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
- ret = pkt->size;
- } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
- ret = avcodec_decode_audio4(avctx, avctx->internal->buffer_frame,
- &got_frame, pkt);
- } else {
- ret = AVERROR(EINVAL);
- }
-
- if (ret == AVERROR(EAGAIN))
- ret = pkt->size;
-
- if (avctx->internal->draining && !got_frame)
- avctx->internal->draining_done = 1;
-
- if (ret < 0)
- return ret;
-
- if (ret >= pkt->size) {
- av_packet_unref(avctx->internal->buffer_pkt);
- } else {
- int consumed = ret;
-
- if (pkt != avctx->internal->buffer_pkt) {
- av_packet_unref(avctx->internal->buffer_pkt);
- if ((ret = av_packet_ref(avctx->internal->buffer_pkt, pkt)) < 0)
- return ret;
- }
-
- avctx->internal->buffer_pkt->data += consumed;
- avctx->internal->buffer_pkt->size -= consumed;
- avctx->internal->buffer_pkt->pts = AV_NOPTS_VALUE;
- avctx->internal->buffer_pkt->dts = AV_NOPTS_VALUE;
- }
-
- if (got_frame)
- av_assert0(avctx->internal->buffer_frame->buf[0]);
-
- return 0;
- }
-
- int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
- {
- int ret;
-
- if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
- return AVERROR(EINVAL);
-
- if (avctx->internal->draining)
+ if (!pkt->data &&
+ !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
+ avctx->active_thread_type & FF_THREAD_FRAME))
return AVERROR_EOF;
- if (avpkt && !avpkt->size && avpkt->data)
- return AVERROR(EINVAL);
-
- if (!avpkt || !avpkt->size) {
- avctx->internal->draining = 1;
- avpkt = NULL;
-
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- return 0;
- }
-
- if (avctx->codec->send_packet) {
- if (avpkt) {
- AVPacket tmp = *avpkt;
++ tmp = *pkt;
+#if FF_API_MERGE_SD
+FF_DISABLE_DEPRECATION_WARNINGS
- int did_split = av_packet_split_side_data(&tmp);
- FF_ENABLE_DEPRECATION_WARNINGS
- #endif
- ret = apply_param_change(avctx, &tmp);
- if (ret >= 0)
- ret = avctx->codec->send_packet(avctx, &tmp);
- #if FF_API_MERGE_SD
- if (did_split)
- av_packet_free_side_data(&tmp);
- #endif
- return ret;
- } else {
- return avctx->codec->send_packet(avctx, NULL);
- }
- }
-
- // Emulation via old API. Assume avpkt is likely not refcounted, while
- // decoder output is always refcounted, and avoid copying.
-
- if (avctx->internal->buffer_pkt->size || avctx->internal->buffer_frame->buf[0])
- return AVERROR(EAGAIN);
-
- // The goal is decoding the first frame of the packet without using memcpy,
- // because the common case is having only 1 frame per packet (especially
- // with video, but audio too). In other cases, it can't be avoided, unless
- // the user is feeding refcounted packets.
- return do_decode(avctx, (AVPacket *)avpkt);
- }
-
- int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
- {
- int ret;
-
- av_frame_unref(frame);
-
- if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
- return AVERROR(EINVAL);
-
- if (avctx->codec->receive_frame) {
- if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
- return AVERROR_EOF;
- ret = avctx->codec->receive_frame(avctx, frame);
- if (ret >= 0) {
- if (av_frame_get_best_effort_timestamp(frame) == AV_NOPTS_VALUE) {
- av_frame_set_best_effort_timestamp(frame,
- guess_correct_pts(avctx, frame->pts, frame->pkt_dts));
- }
- }
- return ret;
- }
-
- // Emulation via old API.
-
- if (!avctx->internal->buffer_frame->buf[0]) {
- if (!avctx->internal->buffer_pkt->size && !avctx->internal->draining)
- return AVERROR(EAGAIN);
-
- while (1) {
- if ((ret = do_decode(avctx, avctx->internal->buffer_pkt)) < 0) {
- av_packet_unref(avctx->internal->buffer_pkt);
- return ret;
- }
- // Some audio decoders may consume partial data without returning
- // a frame (fate-wmapro-2ch). There is no way to make the caller
- // call avcodec_receive_frame() again without returning a frame,
- // so try to decode more in these cases.
- if (avctx->internal->buffer_frame->buf[0] ||
- !avctx->internal->buffer_pkt->size)
- break;
- }
- }
-
- if (!avctx->internal->buffer_frame->buf[0])
- return avctx->internal->draining ? AVERROR_EOF : AVERROR(EAGAIN);
-
- av_frame_move_ref(frame, avctx->internal->buffer_frame);
- return 0;
- }
-
- int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
- int *got_picture_ptr,
- const AVPacket *avpkt)
- {
- AVCodecInternal *avci = avctx->internal;
- int ret;
- // copy to ensure we do not change avpkt
- AVPacket tmp = *avpkt;
++ did_split = av_packet_split_side_data(&tmp);
+
- if (!avctx->codec)
- return AVERROR(EINVAL);
- if (avctx->codec->type != AVMEDIA_TYPE_VIDEO) {
- av_log(avctx, AV_LOG_ERROR, "Invalid media type for video\n");
- return AVERROR(EINVAL);
- }
++ if (did_split) {
++ ret = extract_packet_props(avctx->internal, &tmp);
++ if (ret < 0)
++ return ret;
+
- if (!avctx->codec->decode) {
- av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n");
- return AVERROR(ENOSYS);
++ ret = apply_param_change(avctx, &tmp);
++ if (ret < 0)
++ return ret;
+ }
-
- *got_picture_ptr = 0;
- if ((avctx->coded_width || avctx->coded_height) && av_image_check_size2(avctx->coded_width, avctx->coded_height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
- return AVERROR(EINVAL);
-
- ret = extract_packet_props(avci, avpkt);
- if (ret < 0)
- return ret;
- ret = apply_param_change(avctx, avpkt);
- if (ret < 0)
- return ret;
-
- av_frame_unref(picture);
-
- if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size ||
- (avctx->active_thread_type & FF_THREAD_FRAME)) {
- #if FF_API_MERGE_SD
- FF_DISABLE_DEPRECATION_WARNINGS
- int did_split = av_packet_split_side_data(&tmp);
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
- ret = apply_param_change(avctx, &tmp);
- if (ret < 0)
- goto fail;
+
- ret = extract_packet_props(avci, &tmp);
- if (ret < 0)
- return ret;
- if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
- ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
- &tmp);
- else {
- ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
- &tmp);
- if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
- picture->pkt_dts = avpkt->dts;
+ got_frame = 0;
- if(!avctx->has_b_frames){
- av_frame_set_pkt_pos(picture, avpkt->pos);
- }
+ if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
- ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
++ ret = ff_thread_decode_frame(avctx, frame, &got_frame, &tmp);
+ } else {
- ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
-
- if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
++ ret = avctx->codec->decode(avctx, frame, &got_frame, &tmp);
++
++ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
++ if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
++ frame->pkt_dts = pkt->dts;
++ if(!avctx->has_b_frames)
++ av_frame_set_pkt_pos(frame, pkt->pos);
+ //FIXME these should be under if(!avctx->has_b_frames)
+ /* get_buffer is supposed to set frame parameters */
+ if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
- if (!picture->sample_aspect_ratio.num) picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
- if (!picture->width) picture->width = avctx->width;
- if (!picture->height) picture->height = avctx->height;
- if (picture->format == AV_PIX_FMT_NONE) picture->format = avctx->pix_fmt;
++ if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
++ if (!frame->width) frame->width = avctx->width;
++ if (!frame->height) frame->height = avctx->height;
++ if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
+ }
++ } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ frame->pkt_dts = pkt->dts;
- /* get_buffer is supposed to set frame parameters */
- if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
- frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
- frame->width = avctx->width;
- frame->height = avctx->height;
- frame->format = avctx->codec->type == AVMEDIA_TYPE_VIDEO ?
- avctx->pix_fmt : avctx->sample_fmt;
}
-
- fail:
- emms_c(); //needed to avoid an emms_c() call before every return;
-
- #if FF_API_MERGE_SD
- if (did_split) {
- av_packet_free_side_data(&tmp);
- if(ret == tmp.size)
- ret = avpkt->size;
- }
- #endif
- if (picture->flags & AV_FRAME_FLAG_DISCARD) {
- *got_picture_ptr = 0;
- }
- if (*got_picture_ptr) {
- if (!avctx->refcounted_frames) {
- int err = unrefcount_frame(avci, picture);
- if (err < 0)
- return err;
- }
-
- avctx->frame_number++;
- av_frame_set_best_effort_timestamp(picture,
- guess_correct_pts(avctx,
- picture->pts,
- picture->pkt_dts));
- } else
- av_frame_unref(picture);
- } else
- ret = 0;
-
- /* many decoders assign whole AVFrames, thus overwriting extended_data;
- * make sure it's set correctly */
- av_assert0(!picture->extended_data || picture->extended_data == picture->data);
-
- #if FF_API_AVCTX_TIMEBASE
- if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
- avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
- #endif
-
- return ret;
- }
-
- int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
- AVFrame *frame,
- int *got_frame_ptr,
- const AVPacket *avpkt)
- {
- AVCodecInternal *avci = avctx->internal;
- int ret = 0;
-
- *got_frame_ptr = 0;
-
- if (!avctx->codec)
- return AVERROR(EINVAL);
-
- if (!avctx->codec->decode) {
- av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n");
- return AVERROR(ENOSYS);
- }
-
- if (!avpkt->data && avpkt->size) {
- av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
- return AVERROR(EINVAL);
- }
- if (avctx->codec->type != AVMEDIA_TYPE_AUDIO) {
- av_log(avctx, AV_LOG_ERROR, "Invalid media type for audio\n");
- return AVERROR(EINVAL);
}
-
+ emms_c();
- av_frame_unref(frame);
-
- if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) {
++ if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
++ if (frame->flags & AV_FRAME_FLAG_DISCARD)
++ got_frame = 0;
++ if (got_frame)
++ av_frame_set_best_effort_timestamp(frame,
++ guess_correct_pts(avctx,
++ frame->pts,
++ frame->pkt_dts));
++ } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
+ uint8_t *side;
+ int side_size;
+ uint32_t discard_padding = 0;
+ uint8_t skip_reason = 0;
+ uint8_t discard_reason = 0;
- // copy to ensure we do not change avpkt
- AVPacket tmp = *avpkt;
- #if FF_API_MERGE_SD
- FF_DISABLE_DEPRECATION_WARNINGS
- int did_split = av_packet_split_side_data(&tmp);
- FF_ENABLE_DEPRECATION_WARNINGS
- #endif
- ret = apply_param_change(avctx, &tmp);
- if (ret < 0)
- goto fail;
+
- ret = extract_packet_props(avci, &tmp);
- if (ret < 0)
- return ret;
- if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
- ret = ff_thread_decode_frame(avctx, frame, got_frame_ptr, &tmp);
- else {
- ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp);
- av_assert0(ret <= tmp.size);
- frame->pkt_dts = avpkt->dts;
- }
- if (ret >= 0 && *got_frame_ptr) {
- avctx->frame_number++;
++ if (ret >= 0 && got_frame) {
+ av_frame_set_best_effort_timestamp(frame,
+ guess_correct_pts(avctx,
+ frame->pts,
+ frame->pkt_dts));
+ if (frame->format == AV_SAMPLE_FMT_NONE)
+ frame->format = avctx->sample_fmt;
+ if (!frame->channel_layout)
+ frame->channel_layout = avctx->channel_layout;
+ if (!av_frame_get_channels(frame))
+ av_frame_set_channels(frame, avctx->channels);
+ if (!frame->sample_rate)
+ frame->sample_rate = avctx->sample_rate;
+ }
+
- side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
++ side= av_packet_get_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
+ if(side && side_size>=10) {
+ avctx->internal->skip_samples = AV_RL32(side) * avctx->internal->skip_samples_multiplier;
+ discard_padding = AV_RL32(side + 4);
+ av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
+ avctx->internal->skip_samples, (int)discard_padding);
+ skip_reason = AV_RL8(side + 8);
+ discard_reason = AV_RL8(side + 9);
+ }
+
- if ((frame->flags & AV_FRAME_FLAG_DISCARD) && *got_frame_ptr &&
++ if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
+ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
- *got_frame_ptr = 0;
++ got_frame = 0;
+ }
+
- if (avctx->internal->skip_samples > 0 && *got_frame_ptr &&
++ if (avctx->internal->skip_samples > 0 && got_frame &&
+ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ if(frame->nb_samples <= avctx->internal->skip_samples){
- *got_frame_ptr = 0;
++ got_frame = 0;
+ avctx->internal->skip_samples -= frame->nb_samples;
+ av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
+ avctx->internal->skip_samples);
+ } else {
+ av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples,
+ frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
+ if(avctx->pkt_timebase.num && avctx->sample_rate) {
+ int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
+ (AVRational){1, avctx->sample_rate},
+ avctx->pkt_timebase);
+ if(frame->pts!=AV_NOPTS_VALUE)
+ frame->pts += diff_ts;
+#if FF_API_PKT_PTS
+FF_DISABLE_DEPRECATION_WARNINGS
+ if(frame->pkt_pts!=AV_NOPTS_VALUE)
+ frame->pkt_pts += diff_ts;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+ if(frame->pkt_dts!=AV_NOPTS_VALUE)
+ frame->pkt_dts += diff_ts;
+ if (av_frame_get_pkt_duration(frame) >= diff_ts)
+ av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
+ }
+ av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
+ avctx->internal->skip_samples, frame->nb_samples);
+ frame->nb_samples -= avctx->internal->skip_samples;
+ avctx->internal->skip_samples = 0;
+ }
+ }
+
- if (discard_padding > 0 && discard_padding <= frame->nb_samples && *got_frame_ptr &&
++ if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
+ !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
+ if (discard_padding == frame->nb_samples) {
- *got_frame_ptr = 0;
++ got_frame = 0;
+ } else {
+ if(avctx->pkt_timebase.num && avctx->sample_rate) {
+ int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
+ (AVRational){1, avctx->sample_rate},
+ avctx->pkt_timebase);
+ av_frame_set_pkt_duration(frame, diff_ts);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
+ }
+ av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
+ (int)discard_padding, frame->nb_samples);
+ frame->nb_samples -= discard_padding;
+ }
+ }
+
- if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && *got_frame_ptr) {
++ if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
+ AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
+ if (fside) {
+ AV_WL32(fside->data, avctx->internal->skip_samples);
+ AV_WL32(fside->data + 4, discard_padding);
+ AV_WL8(fside->data + 8, skip_reason);
+ AV_WL8(fside->data + 9, discard_reason);
+ avctx->internal->skip_samples = 0;
+ }
+ }
- fail:
++ }
+#if FF_API_MERGE_SD
- if (did_split) {
- av_packet_free_side_data(&tmp);
- if(ret == tmp.size)
- ret = avpkt->size;
- }
++ if (did_split) {
++ av_packet_free_side_data(&tmp);
++ if(ret == tmp.size)
++ ret = pkt->size;
++ }
+#endif
+
- if (ret >= 0 && *got_frame_ptr) {
++ if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
++ !avci->showed_multi_packet_warning &&
++ ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
++ av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
++ avci->showed_multi_packet_warning = 1;
++ }
++
+ if (!got_frame)
+ av_frame_unref(frame);
+
- if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
++ if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
+ ret = pkt->size;
+
+ #if FF_API_AVCTX_TIMEBASE
+ if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
- avctx->time_base = av_inv_q(avctx->framerate);
++ avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
+ #endif
+
+ if (avctx->internal->draining && !got_frame)
+ avci->draining_done = 1;
+
+ avci->compat_decode_consumed += ret;
+
+ if (ret >= pkt->size || ret < 0) {
+ av_packet_unref(pkt);
+ } else {
+ int consumed = ret;
+
+ pkt->data += consumed;
+ pkt->size -= consumed;
+ pkt->pts = AV_NOPTS_VALUE;
+ pkt->dts = AV_NOPTS_VALUE;
+ avci->last_pkt_props->pts = AV_NOPTS_VALUE;
+ avci->last_pkt_props->dts = AV_NOPTS_VALUE;
+ }
+
+ if (got_frame)
+ av_assert0(frame->buf[0]);
+
+ return ret < 0 ? ret : 0;
+ }
+
+ static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+ {
+ int ret;
+
+ while (!frame->buf[0]) {
+ ret = decode_simple_internal(avctx, frame);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
+ {
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ av_assert0(!frame->buf[0]);
+
+ if (avctx->codec->receive_frame)
+ ret = avctx->codec->receive_frame(avctx, frame);
+ else
+ ret = decode_simple_receive_frame(avctx, frame);
+
+ if (ret == AVERROR_EOF)
+ avci->draining_done = 1;
+
+ return ret;
+ }
+
+ int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
+ {
+ AVCodecInternal *avci = avctx->internal;
- int ret = 0;
++ int ret;
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avctx->internal->draining)
+ return AVERROR_EOF;
+
+ if (avci->buffer_pkt->data || avci->buffer_pkt->side_data_elems)
+ return AVERROR(EAGAIN);
+
++ if (avpkt && !avpkt->size && avpkt->data)
++ return AVERROR(EINVAL);
++
+ if (!avpkt || !avpkt->size) {
+ avctx->internal->draining = 1;
+ } else {
+ ret = av_packet_ref(avci->buffer_pkt, avpkt);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!avci->buffer_frame->buf[0]) {
+ ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
+ if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+ {
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ av_frame_unref(frame);
+
+ if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
+ return AVERROR(EINVAL);
+
+ if (avci->buffer_frame->buf[0]) {
+ av_frame_move_ref(frame, avci->buffer_frame);
+ } else {
+ ret = decode_receive_frame_internal(avctx, frame);
+ if (ret < 0)
+ return ret;
+ }
+
+ avctx->frame_number++;
+
+ return 0;
+ }
+
+ static int compat_decode(AVCodecContext *avctx, AVFrame *frame,
- int *got_frame, AVPacket *pkt)
++ int *got_frame, const AVPacket *pkt)
+ {
+ AVCodecInternal *avci = avctx->internal;
+ int ret;
+
+ av_assert0(avci->compat_decode_consumed == 0);
+
+ *got_frame = 0;
+ avci->compat_decode = 1;
+
+ if (avci->compat_decode_partial_size > 0 &&
+ avci->compat_decode_partial_size != pkt->size) {
+ av_log(avctx, AV_LOG_ERROR,
+ "Got unexpected packet size after a partial decode\n");
+ ret = AVERROR(EINVAL);
+ goto finish;
+ }
+
+ if (!avci->compat_decode_partial_size) {
+ ret = avcodec_send_packet(avctx, pkt);
+ if (ret == AVERROR_EOF)
+ ret = 0;
+ else if (ret == AVERROR(EAGAIN)) {
+ /* we fully drain all the output in each decode call, so this should not
+ * ever happen */
+ ret = AVERROR_BUG;
+ goto finish;
+ } else if (ret < 0)
+ goto finish;
+ }
+
+ while (ret >= 0) {
+ ret = avcodec_receive_frame(avctx, frame);
+ if (ret < 0) {
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ ret = 0;
+ goto finish;
+ }
+
+ if (frame != avci->compat_decode_frame) {
if (!avctx->refcounted_frames) {
- int err = unrefcount_frame(avci, frame);
- if (err < 0)
- return err;
+ ret = unrefcount_frame(avci, frame);
+ if (ret < 0)
+ goto finish;
}
- } else
- av_frame_unref(frame);
- }
- av_assert0(ret <= avpkt->size);
+ *got_frame = 1;
+ frame = avci->compat_decode_frame;
+ } else {
+ if (!avci->compat_decode_warned) {
+ av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
+ "API cannot return all the frames for this decoder. "
+ "Some frames will be dropped. Update your code to the "
+ "new decoding API to fix this.\n");
+ avci->compat_decode_warned = 1;
+ }
+ }
- if (!avci->showed_multi_packet_warning &&
- ret >= 0 && ret != avpkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
- av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
- avci->showed_multi_packet_warning = 1;
+ if (avci->draining || avci->compat_decode_consumed < pkt->size)
+ break;
}
+ finish:
+ if (ret == 0)
+ ret = FFMIN(avci->compat_decode_consumed, pkt->size);
+ avci->compat_decode_consumed = 0;
+ avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
+
return ret;
}
+ int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
- AVPacket *avpkt)
++ const AVPacket *avpkt)
+ {
+ return compat_decode(avctx, picture, got_picture_ptr, avpkt);
+ }
+
+ int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
+ AVFrame *frame,
+ int *got_frame_ptr,
- AVPacket *avpkt)
++ const AVPacket *avpkt)
+ {
+ return compat_decode(avctx, frame, got_frame_ptr, avpkt);
+ }
+
+static void get_subtitle_defaults(AVSubtitle *sub)
+{
+ memset(sub, 0, sizeof(*sub));
+ sub->pts = AV_NOPTS_VALUE;
+}
+
+#define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
+static int recode_subtitle(AVCodecContext *avctx,
+ AVPacket *outpkt, const AVPacket *inpkt)
+{
+#if CONFIG_ICONV
+ iconv_t cd = (iconv_t)-1;
+ int ret = 0;
+ char *inb, *outb;
+ size_t inl, outl;
+ AVPacket tmp;
+#endif
+
+ if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
+ return 0;
+
+#if CONFIG_ICONV
+ cd = iconv_open("UTF-8", avctx->sub_charenc);
+ av_assert0(cd != (iconv_t)-1);
+
+ inb = inpkt->data;
+ inl = inpkt->size;
+
+ if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
+ av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
+ ret = AVERROR(ENOMEM);
+ goto end;
+ }
+
+ ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
+ if (ret < 0)
+ goto end;
+ outpkt->buf = tmp.buf;
+ outpkt->data = tmp.data;
+ outpkt->size = tmp.size;
+ outb = outpkt->data;
+ outl = outpkt->size;
+
+ if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
+ iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
+ outl >= outpkt->size || inl != 0) {
+ ret = FFMIN(AVERROR(errno), -1);
+ av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
+ "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
+ av_packet_unref(&tmp);
+ goto end;
+ }
+ outpkt->size -= outl;
+ memset(outpkt->data + outpkt->size, 0, outl);
+
+end:
+ if (cd != (iconv_t)-1)
+ iconv_close(cd);
+ return ret;
+#else
+ av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
+ return AVERROR(EINVAL);
+#endif
+}
+
+static int utf8_check(const uint8_t *str)
+{
+ const uint8_t *byte;
+ uint32_t codepoint, min;
+
+ while (*str) {
+ byte = str;
+ GET_UTF8(codepoint, *(byte++), return 0;);
+ min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
+ 1 << (5 * (byte - str) - 4);
+ if (codepoint < min || codepoint >= 0x110000 ||
+ codepoint == 0xFFFE /* BOM */ ||
+ codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
+ return 0;
+ str = byte;
+ }
+ return 1;
+}
+
+#if FF_API_ASS_TIMING
+static void insert_ts(AVBPrint *buf, int ts)
+{
+ if (ts == -1) {
+ av_bprintf(buf, "9:59:59.99,");
+ } else {
+ int h, m, s;
+
+ h = ts/360000; ts -= 360000*h;
+ m = ts/ 6000; ts -= 6000*m;
+ s = ts/ 100; ts -= 100*s;
+ av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
+ }
+}
+
+static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
+{
+ int i;
+ AVBPrint buf;
+
+ av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
+
+ for (i = 0; i < sub->num_rects; i++) {
+ char *final_dialog;
+ const char *dialog;
+ AVSubtitleRect *rect = sub->rects[i];
+ int ts_start, ts_duration = -1;
+ long int layer;
+
+ if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
+ continue;
+
+ av_bprint_clear(&buf);
+
+ /* skip ReadOrder */
+ dialog = strchr(rect->ass, ',');
+ if (!dialog)
+ continue;
+ dialog++;
+
+ /* extract Layer or Marked */
+ layer = strtol(dialog, (char**)&dialog, 10);
+ if (*dialog != ',')
+ continue;
+ dialog++;
+
+ /* rescale timing to ASS time base (ms) */
+ ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
+ if (pkt->duration != -1)
+ ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
+ sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
+
+ /* construct ASS (standalone file form with timestamps) string */
+ av_bprintf(&buf, "Dialogue: %ld,", layer);
+ insert_ts(&buf, ts_start);
+ insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
+ av_bprintf(&buf, "%s\r\n", dialog);
+
+ final_dialog = av_strdup(buf.str);
+ if (!av_bprint_is_complete(&buf) || !final_dialog) {
+ av_freep(&final_dialog);
+ av_bprint_finalize(&buf, NULL);
+ return AVERROR(ENOMEM);
+ }
+ av_freep(&rect->ass);
+ rect->ass = final_dialog;
+ }
+
+ av_bprint_finalize(&buf, NULL);
+ return 0;
+}
+#endif
+
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
int *got_sub_ptr,
AVPacket *avpkt)
diff --cc libavcodec/decode.h
index 0000000000,21c7c3e07a..20a46b692a
mode 000000,100644..100644
--- a/libavcodec/decode.h
+++ b/libavcodec/decode.h
@@@ -1,0 -1,35 +1,35 @@@
+ /*
+ * generic decoding-related code
+ *
- * This file is part of Libav.
++ * This file is part of FFmpeg.
+ *
- * Libav is free software; you can redistribute it and/or
++ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
- * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
++ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+ #ifndef AVCODEC_DECODE_H
+ #define AVCODEC_DECODE_H
+
+ /**
+ * Called by decoders to get the next packet for decoding.
+ *
+ * @param pkt An empty packet to be filled with data.
+ * @return 0 if a new reference has been successfully written to pkt
+ * AVERROR(EAGAIN) if no data is currently available
+ * AVERROR_EOF if and end of stream has been reached, so no more data
+ * will be available
+ */
+ int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt);
+
+ #endif /* AVCODEC_DECODE_H */
diff --cc libavcodec/internal.h
index 90a887332e,dc24e8f764..2fd27d8431
--- a/libavcodec/internal.h
+++ b/libavcodec/internal.h
@@@ -173,9 -160,16 +180,20 @@@ typedef struct AVCodecInternal
int buffer_pkt_valid; // encoding: packet without data can be valid
AVFrame *buffer_frame;
int draining_done;
+ /* set to 1 when the caller is using the old decoding API */
+ int compat_decode;
+ int compat_decode_warned;
+ /* this variable is set by the decoder internals to signal to the old
+ * API compat wrappers the amount of data consumed from the last packet */
+ size_t compat_decode_consumed;
+ /* when a partial packet has been consumed, this stores the remaining size
+ * of the packet (that should be submitted in the next decode call */
+ size_t compat_decode_partial_size;
+ AVFrame *compat_decode_frame;
++
+ int showed_multi_packet_warning;
+
+ int skip_samples_multiplier;
} AVCodecInternal;
struct AVCodecDefault {
diff --cc libavcodec/utils.c
index 6a68971d68,8a422d7669..e50d640976
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@@ -1160,9 -773,8 +1176,10 @@@ av_cold int avcodec_close(AVCodecContex
ff_thread_free(avctx);
if (avctx->codec && avctx->codec->close)
avctx->codec->close(avctx);
+ avctx->internal->byte_buffer_size = 0;
+ av_freep(&avctx->internal->byte_buffer);
av_frame_free(&avctx->internal->to_free);
+ av_frame_free(&avctx->internal->compat_decode_frame);
av_frame_free(&avctx->internal->buffer_frame);
av_packet_free(&avctx->internal->buffer_pkt);
av_packet_free(&avctx->internal->last_pkt_props);
More information about the ffmpeg-cvslog
mailing list