[FFmpeg-cvslog] Merge commit '7b917041184874e7d7cba4450813de7e0bb28a33'

James Almer git at videolan.org
Sun Oct 22 05:41:52 EEST 2017


ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Sat Oct 21 23:40:03 2017 -0300| [c68a3ab96ec0497ae2d627ddd30c61737d18173e] | committer: James Almer

Merge commit '7b917041184874e7d7cba4450813de7e0bb28a33'

* commit '7b917041184874e7d7cba4450813de7e0bb28a33':
  lavc: Drop deprecated VDPAU codec capability

Merged-by: James Almer <jamrial at gmail.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=c68a3ab96ec0497ae2d627ddd30c61737d18173e
---

 libavcodec/avcodec.h          | 14 +-------------
 libavcodec/decode.c           |  4 ----
 libavcodec/error_resilience.c |  3 ---
 libavcodec/h263dec.c          |  7 -------
 libavcodec/h264_picture.c     | 12 ------------
 libavcodec/h264_slice.c       | 23 ++---------------------
 libavcodec/h264dec.c          | 21 ---------------------
 libavcodec/mpegpicture.c      |  6 +-----
 libavcodec/mpegvideo.c        | 12 ++----------
 libavcodec/vc1dec.c           | 38 +++-----------------------------------
 libavcodec/vdpau.h            | 34 ----------------------------------
 libavcodec/version.h          |  3 ---
 12 files changed, 9 insertions(+), 168 deletions(-)

diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 6922b5b6fc..076332b6f1 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -1031,13 +1031,6 @@ typedef struct RcOverride{
  */
 #define AV_CODEC_CAP_SMALL_LAST_FRAME    (1 <<  6)
 
-#if FF_API_CAP_VDPAU
-/**
- * Codec can export data for HW decoding (VDPAU).
- */
-#define AV_CODEC_CAP_HWACCEL_VDPAU       (1 <<  7)
-#endif
-
 /**
  * Codec can output multiple frames per AVPacket
  * Normally demuxers return one frame at a time, demuxers which do not do
@@ -1222,12 +1215,7 @@ typedef struct RcOverride{
  * This can be used to prevent truncation of the last audio samples.
  */
 #define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME
-#if FF_API_CAP_VDPAU
-/**
- * Codec can export data for HW decoding (VDPAU).
- */
-#define CODEC_CAP_HWACCEL_VDPAU    AV_CODEC_CAP_HWACCEL_VDPAU
-#endif
+
 /**
  * Codec can output multiple frames per AVPacket
  * Normally demuxers return one frame at a time, demuxers which do not do
diff --git a/libavcodec/decode.c b/libavcodec/decode.c
index 1337ffb527..7f08f0ec18 100644
--- a/libavcodec/decode.c
+++ b/libavcodec/decode.c
@@ -1214,10 +1214,6 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
 
         if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
             break;
-#if FF_API_CAP_VDPAU
-        if (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
-            break;
-#endif
 
         if (avctx->hw_frames_ctx) {
             AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c
index 5364940e94..0c7f29d171 100644
--- a/libavcodec/error_resilience.c
+++ b/libavcodec/error_resilience.c
@@ -814,9 +814,6 @@ void ff_er_frame_start(ERContext *s)
 static int er_supported(ERContext *s)
 {
     if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice           ||
-#if FF_API_CAP_VDPAU
-       s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU          ||
-#endif
        !s->cur_pic.f                                                  ||
        s->cur_pic.field_picture
     )
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index bcb2b08bb0..288a0cd431 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -603,13 +603,6 @@ retry:
     if (!s->divx_packed)
         ff_thread_finish_setup(avctx);
 
-#if FF_API_CAP_VDPAU
-    if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)) {
-        ff_vdpau_mpeg4_decode_picture(avctx->priv_data, s->gb.buffer, s->gb.buffer_end - s->gb.buffer);
-        goto frame_end;
-    }
-#endif
-
     if (avctx->hwaccel) {
         ret = avctx->hwaccel->start_frame(avctx, s->gb.buffer,
                                           s->gb.buffer_end - s->gb.buffer);
diff --git a/libavcodec/h264_picture.c b/libavcodec/h264_picture.c
index 99d9f9075c..30987d3145 100644
--- a/libavcodec/h264_picture.c
+++ b/libavcodec/h264_picture.c
@@ -152,12 +152,6 @@ int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
     int err = 0;
     h->mb_y = 0;
 
-#if FF_API_CAP_VDPAU
-    if (CONFIG_H264_VDPAU_DECODER &&
-        h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
-        ff_vdpau_h264_set_reference_frames(h);
-#endif
-
     if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
         if (!h->droppable) {
             err = ff_h264_execute_ref_pic_marking(h);
@@ -175,12 +169,6 @@ int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
                    "hardware accelerator failed to decode picture\n");
     }
 
-#if FF_API_CAP_VDPAU
-    if (CONFIG_H264_VDPAU_DECODER &&
-        h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
-        ff_vdpau_h264_picture_complete(h);
-#endif
-
     if (!in_setup && !h->droppable)
         ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
                                   h->picture_structure == PICT_BOTTOM_FIELD);
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 2577edd8a6..5d9558745e 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -497,11 +497,7 @@ static int h264_frame_start(H264Context *h)
 
     if ((ret = alloc_picture(h, pic)) < 0)
         return ret;
-    if(!h->frame_recovered && !h->avctx->hwaccel
-#if FF_API_CAP_VDPAU
-       && !(h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
-#endif
-       )
+    if(!h->frame_recovered && !h->avctx->hwaccel)
         ff_color_frame(pic->f, c);
 
     h->cur_pic_ptr = pic;
@@ -939,17 +935,6 @@ static int h264_slice_header_init(H264Context *h)
         goto fail;
     }
 
-#if FF_API_CAP_VDPAU
-    if (h->avctx->codec &&
-        h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU &&
-        (sps->bit_depth_luma != 8 || sps->chroma_format_idc > 1)) {
-        av_log(h->avctx, AV_LOG_ERROR,
-                "VDPAU decoding does not support video colorspace.\n");
-        ret = AVERROR_INVALIDDATA;
-        goto fail;
-    }
-#endif
-
     if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
         sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
     ) {
@@ -2738,11 +2723,7 @@ int ff_h264_execute_decode_slices(H264Context *h)
 
     h->slice_ctx[0].next_slice_idx = INT_MAX;
 
-    if (h->avctx->hwaccel || context_count < 1
-#if FF_API_CAP_VDPAU
-        || h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU
-#endif
-        )
+    if (h->avctx->hwaccel || context_count < 1)
         return 0;
 
     av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c
index f29c3f9048..b11a5ea636 100644
--- a/libavcodec/h264dec.c
+++ b/libavcodec/h264dec.c
@@ -527,10 +527,6 @@ static void flush_dpb(AVCodecContext *avctx)
     h->context_initialized = 0;
 }
 
-#if FF_API_CAP_VDPAU
-static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
-#endif
-
 static int get_last_needed_nal(H264Context *h)
 {
     int nals_needed = 0;
@@ -688,11 +684,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
                 if (h->avctx->hwaccel &&
                     (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
                     goto end;
-#if FF_API_CAP_VDPAU
-                if (CONFIG_H264_VDPAU_DECODER &&
-                    h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
-                    ff_vdpau_h264_picture_start(h);
-#endif
             }
 
             max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
@@ -701,18 +692,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
                     ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
                     h->nb_slice_ctx_queued = 0;
                 } else
-#if FF_API_CAP_VDPAU
-            if (CONFIG_H264_VDPAU_DECODER &&
-                       h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) {
-                ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
-                                        start_code,
-                                        sizeof(start_code));
-                ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
-                                        nal->raw_data,
-                                        nal->raw_size);
-                ret = 0;
-            } else
-#endif
                     ret = ff_h264_execute_decode_slices(h);
                 if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                     goto end;
diff --git a/libavcodec/mpegpicture.c b/libavcodec/mpegpicture.c
index 53fb35b4bd..9811a778b7 100644
--- a/libavcodec/mpegpicture.c
+++ b/libavcodec/mpegpicture.c
@@ -58,11 +58,7 @@ int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
 {
     int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
 
-    if (avctx->hwaccel
-#if FF_API_CAP_VDPAU
-        || avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU
-#endif
-        )
+    if (avctx->hwaccel)
         return 0;
 
     if (linesize < 24) {
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index c4089972f0..75e6742995 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -1311,11 +1311,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
             return -1;
         }
 
-        if (!avctx->hwaccel
-#if FF_API_CAP_VDPAU
-            && !(avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
-#endif
-            ) {
+        if (!avctx->hwaccel) {
             for(i=0; i<avctx->height; i++)
                 memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
                        0x80, avctx->width);
@@ -1661,11 +1657,7 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_
     }
 
     /* TODO: export all the following to make them accessible for users (and filters) */
-    if (avctx->hwaccel || !mbtype_table
-#if FF_API_CAP_VDPAU
-        || (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
-#endif
-        )
+    if (avctx->hwaccel || !mbtype_table)
         return;
 
 
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index 16c601e756..b68115613a 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -657,15 +657,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
         return buf_size;
     }
 
-#if FF_API_CAP_VDPAU
-    if (s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) {
-        if (v->profile < PROFILE_ADVANCED)
-            avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
-        else
-            avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
-    }
-#endif
-
     //for advanced profile we may need to parse and unescape data
     if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
         int buf_size2 = 0;
@@ -684,21 +675,13 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                 if (size <= 0) continue;
                 switch (AV_RB32(start)) {
                 case VC1_CODE_FRAME:
-                    if (avctx->hwaccel
-#if FF_API_CAP_VDPAU
-                        || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU
-#endif
-                        )
+                    if (avctx->hwaccel)
                         buf_start = start;
                     buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
                     break;
                 case VC1_CODE_FIELD: {
                     int buf_size3;
-                    if (avctx->hwaccel
-#if FF_API_CAP_VDPAU
-                        || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU
-#endif
-                        )
+                    if (avctx->hwaccel)
                         buf_start_second_field = start;
                     tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
                     if (!tmp) {
@@ -764,11 +747,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                 ret = AVERROR_INVALIDDATA;
                 goto err;
             } else { // found field marker, unescape second field
-                if (avctx->hwaccel
-#if FF_API_CAP_VDPAU
-                    || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU
-#endif
-                    )
+                if (avctx->hwaccel)
                     buf_start_second_field = divider;
                 tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
                 if (!tmp) {
@@ -917,17 +896,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     s->me.qpel_put = s->qdsp.put_qpel_pixels_tab;
     s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
 
-#if FF_API_CAP_VDPAU
-    if ((CONFIG_VC1_VDPAU_DECODER)
-        &&s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) {
-        if (v->field_mode && buf_start_second_field) {
-            ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
-            ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
-        } else {
-            ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
-        }
-    } else
-#endif
     if (avctx->hwaccel) {
         s->mb_y = 0;
         if (v->field_mode && buf_start_second_field) {
diff --git a/libavcodec/vdpau.h b/libavcodec/vdpau.h
index 855d387d9a..458e4fa605 100644
--- a/libavcodec/vdpau.h
+++ b/libavcodec/vdpau.h
@@ -214,40 +214,6 @@ attribute_deprecated
 int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);
 #endif
 
-#if FF_API_CAP_VDPAU
-/** @brief The videoSurface is used for rendering. */
-#define FF_VDPAU_STATE_USED_FOR_RENDER 1
-
-/**
- * @brief The videoSurface is needed for reference/prediction.
- * The codec manipulates this.
- */
-#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2
-
-/**
- * @brief This structure is used as a callback between the FFmpeg
- * decoder (vd_) and presentation (vo_) module.
- * This is used for defining a video frame containing surface,
- * picture parameter, bitstream information etc which are passed
- * between the FFmpeg decoder and its clients.
- */
-struct vdpau_render_state {
-    VdpVideoSurface surface; ///< Used as rendered surface, never changed.
-
-    int state; ///< Holds FF_VDPAU_STATE_* values.
-
-    /** picture parameter information for all supported codecs */
-    union AVVDPAUPictureInfo info;
-
-    /** Describe size/location of the compressed video data.
-        Set to 0 when freeing bitstream_buffers. */
-    int bitstream_buffers_allocated;
-    int bitstream_buffers_used;
-    /** The user is responsible for freeing this buffer using av_freep(). */
-    VdpBitstreamBuffer *bitstream_buffers;
-};
-#endif
-
 /* @}*/
 
 #endif /* AVCODEC_VDPAU_H */
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 1431d94d76..0335a2868c 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -57,9 +57,6 @@
 #ifndef FF_API_LOWRES
 #define FF_API_LOWRES            (LIBAVCODEC_VERSION_MAJOR < 59)
 #endif
-#ifndef FF_API_CAP_VDPAU
-#define FF_API_CAP_VDPAU         (LIBAVCODEC_VERSION_MAJOR < 58)
-#endif
 #ifndef FF_API_BUFS_VDPAU
 #define FF_API_BUFS_VDPAU        (LIBAVCODEC_VERSION_MAJOR < 58)
 #endif


======================================================================

diff --cc libavcodec/avcodec.h
index 6922b5b6fc,f5711baa35..076332b6f1
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@@ -1030,14 -890,6 +1030,7 @@@ typedef struct RcOverride
   * This can be used to prevent truncation of the last audio samples.
   */
  #define AV_CODEC_CAP_SMALL_LAST_FRAME    (1 <<  6)
 +
- #if FF_API_CAP_VDPAU
- /**
-  * Codec can export data for HW decoding (VDPAU).
-  */
- #define AV_CODEC_CAP_HWACCEL_VDPAU       (1 <<  7)
- #endif
- 
  /**
   * Codec can output multiple frames per AVPacket
   * Normally demuxers return one frame at a time, demuxers which do not do
@@@ -1221,13 -1045,7 +1214,8 @@@
   * Codec can be fed a final frame with a smaller size.
   * This can be used to prevent truncation of the last audio samples.
   */
 -#define CODEC_CAP_SMALL_LAST_FRAME 0x0040
 +#define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME
- #if FF_API_CAP_VDPAU
- /**
-  * Codec can export data for HW decoding (VDPAU).
-  */
- #define CODEC_CAP_HWACCEL_VDPAU    AV_CODEC_CAP_HWACCEL_VDPAU
- #endif
++
  /**
   * Codec can output multiple frames per AVPacket
   * Normally demuxers return one frame at a time, demuxers which do not do
diff --cc libavcodec/error_resilience.c
index 5364940e94,bf3a6882c7..0c7f29d171
--- a/libavcodec/error_resilience.c
+++ b/libavcodec/error_resilience.c
@@@ -811,19 -749,6 +811,16 @@@ void ff_er_frame_start(ERContext *s
      s->error_occurred = 0;
  }
  
 +static int er_supported(ERContext *s)
 +{
 +    if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice           ||
- #if FF_API_CAP_VDPAU
-        s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU          ||
- #endif
 +       !s->cur_pic.f                                                  ||
 +       s->cur_pic.field_picture
 +    )
 +        return 0;
 +    return 1;
 +}
 +
  /**
   * Add a slice.
   * @param endx   x component of the last macroblock, can be -1
diff --cc libavcodec/h264_picture.c
index 99d9f9075c,24ba79df0e..30987d3145
--- a/libavcodec/h264_picture.c
+++ b/libavcodec/h264_picture.c
@@@ -152,12 -148,10 +152,6 @@@ int ff_h264_field_end(H264Context *h, H
      int err = 0;
      h->mb_y = 0;
  
- #if FF_API_CAP_VDPAU
-     if (CONFIG_H264_VDPAU_DECODER &&
-         h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
-         ff_vdpau_h264_set_reference_frames(h);
- #endif
 -    if (!in_setup && !h->droppable)
 -        ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
 -                                  h->picture_structure == PICT_BOTTOM_FIELD);
--
      if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
          if (!h->droppable) {
              err = ff_h264_execute_ref_pic_marking(h);
@@@ -175,15 -168,29 +169,9 @@@
                     "hardware accelerator failed to decode picture\n");
      }
  
- #if FF_API_CAP_VDPAU
-     if (CONFIG_H264_VDPAU_DECODER &&
-         h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
-         ff_vdpau_h264_picture_complete(h);
- #endif
 -#if CONFIG_ERROR_RESILIENCE
 -    /*
 -     * FIXME: Error handling code does not seem to support interlaced
 -     * when slices span multiple rows
 -     * The ff_er_add_slice calls don't work right for bottom
 -     * fields; they cause massive erroneous error concealing
 -     * Error marking covers both fields (top and bottom).
 -     * This causes a mismatched s->error_count
 -     * and a bad error table. Further, the error count goes to
 -     * INT_MAX when called for bottom field, because mb_y is
 -     * past end by one (callers fault) and resync_mb_y != 0
 -     * causes problems for the first MB line, too.
 -     */
 -    if (!FIELD_PICTURE(h) && h->enable_er) {
 -        h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr);
 -        h264_set_erpic(&sl->er.last_pic,
 -                       sl->ref_count[0] ? sl->ref_list[0][0].parent : NULL);
 -        h264_set_erpic(&sl->er.next_pic,
 -                       sl->ref_count[1] ? sl->ref_list[1][0].parent : NULL);
 -        ff_er_frame_end(&sl->er);
 -    }
 -#endif /* CONFIG_ERROR_RESILIENCE */
--
 +    if (!in_setup && !h->droppable)
 +        ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
 +                                  h->picture_structure == PICT_BOTTOM_FIELD);
      emms_c();
  
      h->current_slice = 0;
diff --cc libavcodec/h264_slice.c
index 2577edd8a6,427cbe618c..5d9558745e
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@@ -495,34 -478,8 +495,30 @@@ static int h264_frame_start(H264Contex
      pic->f->crop_top    = h->crop_top;
      pic->f->crop_bottom = h->crop_bottom;
  
 -    if (CONFIG_ERROR_RESILIENCE && h->enable_er)
 +    if ((ret = alloc_picture(h, pic)) < 0)
 +        return ret;
-     if(!h->frame_recovered && !h->avctx->hwaccel
- #if FF_API_CAP_VDPAU
-        && !(h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
- #endif
-        )
++    if(!h->frame_recovered && !h->avctx->hwaccel)
 +        ff_color_frame(pic->f, c);
 +
 +    h->cur_pic_ptr = pic;
 +    ff_h264_unref_picture(h, &h->cur_pic);
 +    if (CONFIG_ERROR_RESILIENCE) {
 +        ff_h264_set_erpic(&h->slice_ctx[0].er.cur_pic, NULL);
 +    }
 +
 +    if ((ret = ff_h264_ref_picture(h, &h->cur_pic, h->cur_pic_ptr)) < 0)
 +        return ret;
 +
 +    for (i = 0; i < h->nb_slice_ctx; i++) {
 +        h->slice_ctx[i].linesize   = h->cur_pic_ptr->f->linesize[0];
 +        h->slice_ctx[i].uvlinesize = h->cur_pic_ptr->f->linesize[1];
 +    }
 +
 +    if (CONFIG_ERROR_RESILIENCE && h->enable_er) {
          ff_er_frame_start(&h->slice_ctx[0].er);
 +        ff_h264_set_erpic(&h->slice_ctx[0].er.last_pic, NULL);
 +        ff_h264_set_erpic(&h->slice_ctx[0].er.next_pic, NULL);
 +    }
  
      for (i = 0; i < 16; i++) {
          h->block_offset[i]           = (4 * ((scan8[i] - scan8[0]) & 7) << pixel_shift) + 4 * pic->f->linesize[0] * ((scan8[i] - scan8[0]) >> 3);
@@@ -936,32 -861,16 +932,21 @@@ static int h264_slice_header_init(H264C
      ret = ff_h264_alloc_tables(h);
      if (ret < 0) {
          av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
 -        return ret;
 +        goto fail;
      }
  
- #if FF_API_CAP_VDPAU
-     if (h->avctx->codec &&
-         h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU &&
-         (sps->bit_depth_luma != 8 || sps->chroma_format_idc > 1)) {
-         av_log(h->avctx, AV_LOG_ERROR,
-                 "VDPAU decoding does not support video colorspace.\n");
-         ret = AVERROR_INVALIDDATA;
-         goto fail;
-     }
- #endif
- 
 -    if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 10) {
 +    if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
 +        sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
 +    ) {
          av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
                 sps->bit_depth_luma);
 -        return AVERROR_INVALIDDATA;
 +        ret = AVERROR_INVALIDDATA;
 +        goto fail;
      }
  
 +    h->cur_bit_depth_luma         =
      h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
 +    h->cur_chroma_format_idc      = sps->chroma_format_idc;
      h->pixel_shift                = sps->bit_depth_luma > 8;
      h->chroma_format_idc          = sps->chroma_format_idc;
      h->bit_depth_luma             = sps->bit_depth_luma;
@@@ -2736,17 -2504,8 +2721,13 @@@ int ff_h264_execute_decode_slices(H264C
      int ret = 0;
      int i, j;
  
 +    h->slice_ctx[0].next_slice_idx = INT_MAX;
 +
-     if (h->avctx->hwaccel || context_count < 1
- #if FF_API_CAP_VDPAU
-         || h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU
- #endif
-         )
+     if (h->avctx->hwaccel || context_count < 1)
          return 0;
 +
 +    av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
 +
      if (context_count == 1) {
  
          h->slice_ctx[0].next_slice_idx = h->mb_width * h->mb_height;
diff --cc libavcodec/h264dec.c
index f29c3f9048,2a532a7ef7..b11a5ea636
--- a/libavcodec/h264dec.c
+++ b/libavcodec/h264dec.c
@@@ -653,46 -572,15 +649,41 @@@ static int decode_nal_units(H264Contex
          err = 0;
          switch (nal->type) {
          case H264_NAL_IDR_SLICE:
 -            idr(h); // FIXME ensure we don't lose some frames if there is reordering
 +            if ((nal->data[1] & 0xFC) == 0x98) {
 +                av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
 +                h->next_outputed_poc = INT_MIN;
 +                ret = -1;
 +                goto end;
 +            }
 +            if(!idr_cleared) {
 +                if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
 +                    av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
 +                    ret = AVERROR_INVALIDDATA;
 +                    goto end;
 +                }
 +                idr(h); // FIXME ensure we don't lose some frames if there is reordering
 +            }
 +            idr_cleared = 1;
 +            h->has_recovery_point = 1;
          case H264_NAL_SLICE:
 -            if ((err = ff_h264_queue_decode_slice(h, nal)))
 +            h->has_slice = 1;
 +
 +            if ((err = ff_h264_queue_decode_slice(h, nal))) {
 +                H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
 +                sl->ref_count[0] = sl->ref_count[1] = 0;
                  break;
 +            }
 +
 +            if (h->current_slice == 1) {
 +                if (avctx->active_thread_type & FF_THREAD_FRAME &&
 +                    i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
 +                    ff_thread_finish_setup(avctx);
 +                    h->setup_finished = 1;
 +                }
  
 -            if (avctx->active_thread_type & FF_THREAD_FRAME &&
 -                i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
 -                ff_thread_finish_setup(avctx);
 -                h->setup_finished = 1;
 +                if (h->avctx->hwaccel &&
 +                    (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
 +                    goto end;
- #if FF_API_CAP_VDPAU
-                 if (CONFIG_H264_VDPAU_DECODER &&
-                     h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
-                     ff_vdpau_h264_picture_start(h);
- #endif
              }
  
              max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
diff --cc libavcodec/mpegpicture.c
index 53fb35b4bd,1d9544b482..9811a778b7
--- a/libavcodec/mpegpicture.c
+++ b/libavcodec/mpegpicture.c
@@@ -56,19 -56,7 +56,15 @@@ do {
  int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
                              ScratchpadContext *sc, int linesize)
  {
 -    int alloc_size = FFALIGN(FFABS(linesize) + 32, 32);
 +    int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
 +
-     if (avctx->hwaccel
- #if FF_API_CAP_VDPAU
-         || avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU
- #endif
-         )
++    if (avctx->hwaccel)
 +        return 0;
 +
 +    if (linesize < 24) {
 +        av_log(avctx, AV_LOG_ERROR, "Image too small, temporary buffers cannot function\n");
 +        return AVERROR_PATCHWELCOME;
 +    }
  
      // edge emu needs blocksize + filter length - 1
      // (= 17x17 for  halfpel / 21x21 for H.264)
diff --cc libavcodec/mpegvideo.c
index c4089972f0,379b690b4d..75e6742995
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@@ -1311,28 -1167,14 +1311,24 @@@ int ff_mpv_frame_start(MpegEncContext *
              return -1;
          }
  
-         if (!avctx->hwaccel
- #if FF_API_CAP_VDPAU
-             && !(avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
- #endif
-             ) {
 -        memset(s->last_picture_ptr->f->data[0], 0,
 -               avctx->height * s->last_picture_ptr->f->linesize[0]);
 -        memset(s->last_picture_ptr->f->data[1], 0x80,
 -               (avctx->height >> v_chroma_shift) *
 -               s->last_picture_ptr->f->linesize[1]);
 -        memset(s->last_picture_ptr->f->data[2], 0x80,
 -               (avctx->height >> v_chroma_shift) *
 -               s->last_picture_ptr->f->linesize[2]);
++        if (!avctx->hwaccel) {
 +            for(i=0; i<avctx->height; i++)
 +                memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
 +                       0x80, avctx->width);
 +            if (s->last_picture_ptr->f->data[2]) {
 +                for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
 +                    memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
 +                        0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
 +                    memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
 +                        0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
 +                }
 +            }
 +
 +            if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
 +                for(i=0; i<avctx->height; i++)
 +                memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
 +            }
 +        }
  
          ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
          ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
@@@ -1572,148 -1281,77 +1568,144 @@@ static int add_mb(AVMotionVector *mb, u
  /**
   * Print debugging info for the given picture.
   */
 -void ff_print_debug_info(MpegEncContext *s, Picture *p)
 +void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table,
 +                         uint32_t *mbtype_table, int8_t *qscale_table, int16_t (*motion_val[2])[2],
 +                         int *low_delay,
 +                         int mb_width, int mb_height, int mb_stride, int quarter_sample)
  {
 -    AVFrame *pict;
 -    if (s->avctx->hwaccel || !p || !p->mb_type)
 +    if ((avctx->flags2 & AV_CODEC_FLAG2_EXPORT_MVS) && mbtype_table && motion_val[0]) {
 +        const int shift = 1 + quarter_sample;
 +        const int scale = 1 << shift;
 +        const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
 +        const int mv_stride      = (mb_width << mv_sample_log2) +
 +                                   (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
 +        int mb_x, mb_y, mbcount = 0;
 +
 +        /* size is width * height * 2 * 4 where 2 is for directions and 4 is
 +         * for the maximum number of MB (4 MB in case of IS_8x8) */
 +        AVMotionVector *mvs = av_malloc_array(mb_width * mb_height, 2 * 4 * sizeof(AVMotionVector));
 +        if (!mvs)
 +            return;
 +
 +        for (mb_y = 0; mb_y < mb_height; mb_y++) {
 +            for (mb_x = 0; mb_x < mb_width; mb_x++) {
 +                int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
 +                for (direction = 0; direction < 2; direction++) {
 +                    if (!USES_LIST(mb_type, direction))
 +                        continue;
 +                    if (IS_8X8(mb_type)) {
 +                        for (i = 0; i < 4; i++) {
 +                            int sx = mb_x * 16 + 4 + 8 * (i & 1);
 +                            int sy = mb_y * 16 + 4 + 8 * (i >> 1);
 +                            int xy = (mb_x * 2 + (i & 1) +
 +                                      (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
 +                            int mx = motion_val[direction][xy][0];
 +                            int my = motion_val[direction][xy][1];
 +                            mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, scale, direction);
 +                        }
 +                    } else if (IS_16X8(mb_type)) {
 +                        for (i = 0; i < 2; i++) {
 +                            int sx = mb_x * 16 + 8;
 +                            int sy = mb_y * 16 + 4 + 8 * i;
 +                            int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
 +                            int mx = motion_val[direction][xy][0];
 +                            int my = motion_val[direction][xy][1];
 +
 +                            if (IS_INTERLACED(mb_type))
 +                                my *= 2;
 +
 +                            mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, scale, direction);
 +                        }
 +                    } else if (IS_8X16(mb_type)) {
 +                        for (i = 0; i < 2; i++) {
 +                            int sx = mb_x * 16 + 4 + 8 * i;
 +                            int sy = mb_y * 16 + 8;
 +                            int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
 +                            int mx = motion_val[direction][xy][0];
 +                            int my = motion_val[direction][xy][1];
 +
 +                            if (IS_INTERLACED(mb_type))
 +                                my *= 2;
 +
 +                            mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, scale, direction);
 +                        }
 +                    } else {
 +                          int sx = mb_x * 16 + 8;
 +                          int sy = mb_y * 16 + 8;
 +                          int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
 +                          int mx = motion_val[direction][xy][0];
 +                          int my = motion_val[direction][xy][1];
 +                          mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, scale, direction);
 +                    }
 +                }
 +            }
 +        }
 +
 +        if (mbcount) {
 +            AVFrameSideData *sd;
 +
 +            av_log(avctx, AV_LOG_DEBUG, "Adding %d MVs info to frame %d\n", mbcount, avctx->frame_number);
 +            sd = av_frame_new_side_data(pict, AV_FRAME_DATA_MOTION_VECTORS, mbcount * sizeof(AVMotionVector));
 +            if (!sd) {
 +                av_freep(&mvs);
 +                return;
 +            }
 +            memcpy(sd->data, mvs, mbcount * sizeof(AVMotionVector));
 +        }
 +
 +        av_freep(&mvs);
 +    }
 +
 +    /* TODO: export all the following to make them accessible for users (and filters) */
-     if (avctx->hwaccel || !mbtype_table
- #if FF_API_CAP_VDPAU
-         || (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
- #endif
-         )
++    if (avctx->hwaccel || !mbtype_table)
          return;
 -    pict = p->f;
  
 -    if (s->avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
 +
 +    if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) {
          int x,y;
  
 -        av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
 -        switch (pict->pict_type) {
 -        case AV_PICTURE_TYPE_I:
 -            av_log(s->avctx,AV_LOG_DEBUG,"I\n");
 -            break;
 -        case AV_PICTURE_TYPE_P:
 -            av_log(s->avctx,AV_LOG_DEBUG,"P\n");
 -            break;
 -        case AV_PICTURE_TYPE_B:
 -            av_log(s->avctx,AV_LOG_DEBUG,"B\n");
 -            break;
 -        case AV_PICTURE_TYPE_S:
 -            av_log(s->avctx,AV_LOG_DEBUG,"S\n");
 -            break;
 -        case AV_PICTURE_TYPE_SI:
 -            av_log(s->avctx,AV_LOG_DEBUG,"SI\n");
 -            break;
 -        case AV_PICTURE_TYPE_SP:
 -            av_log(s->avctx,AV_LOG_DEBUG,"SP\n");
 -            break;
 -        }
 -        for (y = 0; y < s->mb_height; y++) {
 -            for (x = 0; x < s->mb_width; x++) {
 -                if (s->avctx->debug & FF_DEBUG_SKIP) {
 -                    int count = s->mbskip_table[x + y * s->mb_stride];
 +        av_log(avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
 +               av_get_picture_type_char(pict->pict_type));
 +        for (y = 0; y < mb_height; y++) {
 +            for (x = 0; x < mb_width; x++) {
 +                if (avctx->debug & FF_DEBUG_SKIP) {
 +                    int count = mbskip_table ? mbskip_table[x + y * mb_stride] : 0;
                      if (count > 9)
                          count = 9;
 -                    av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
 +                    av_log(avctx, AV_LOG_DEBUG, "%1d", count);
                  }
 -                if (s->avctx->debug & FF_DEBUG_QP) {
 -                    av_log(s->avctx, AV_LOG_DEBUG, "%2d",
 -                           p->qscale_table[x + y * s->mb_stride]);
 +                if (avctx->debug & FF_DEBUG_QP) {
 +                    av_log(avctx, AV_LOG_DEBUG, "%2d",
 +                           qscale_table[x + y * mb_stride]);
                  }
 -                if (s->avctx->debug & FF_DEBUG_MB_TYPE) {
 -                    int mb_type = p->mb_type[x + y * s->mb_stride];
 +                if (avctx->debug & FF_DEBUG_MB_TYPE) {
 +                    int mb_type = mbtype_table[x + y * mb_stride];
                      // Type & MV direction
                      if (IS_PCM(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "P");
 +                        av_log(avctx, AV_LOG_DEBUG, "P");
                      else if (IS_INTRA(mb_type) && IS_ACPRED(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "A");
 +                        av_log(avctx, AV_LOG_DEBUG, "A");
                      else if (IS_INTRA4x4(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "i");
 +                        av_log(avctx, AV_LOG_DEBUG, "i");
                      else if (IS_INTRA16x16(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "I");
 +                        av_log(avctx, AV_LOG_DEBUG, "I");
                      else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "d");
 +                        av_log(avctx, AV_LOG_DEBUG, "d");
                      else if (IS_DIRECT(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "D");
 +                        av_log(avctx, AV_LOG_DEBUG, "D");
                      else if (IS_GMC(mb_type) && IS_SKIP(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "g");
 +                        av_log(avctx, AV_LOG_DEBUG, "g");
                      else if (IS_GMC(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "G");
 +                        av_log(avctx, AV_LOG_DEBUG, "G");
                      else if (IS_SKIP(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "S");
 +                        av_log(avctx, AV_LOG_DEBUG, "S");
                      else if (!USES_LIST(mb_type, 1))
 -                        av_log(s->avctx, AV_LOG_DEBUG, ">");
 +                        av_log(avctx, AV_LOG_DEBUG, ">");
                      else if (!USES_LIST(mb_type, 0))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "<");
 +                        av_log(avctx, AV_LOG_DEBUG, "<");
                      else {
 -                        assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
 -                        av_log(s->avctx, AV_LOG_DEBUG, "X");
 +                        av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
 +                        av_log(avctx, AV_LOG_DEBUG, "X");
                      }
  
                      // segmentation
diff --cc libavcodec/vc1dec.c
index 16c601e756,b26fbf2eff..b68115613a
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@@ -654,18 -613,9 +654,9 @@@ static int vc1_decode_frame(AVCodecCont
              *got_frame = 1;
          }
  
 -        return 0;
 +        return buf_size;
      }
  
- #if FF_API_CAP_VDPAU
-     if (s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) {
-         if (v->profile < PROFILE_ADVANCED)
-             avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
-         else
-             avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
-     }
- #endif
- 
      //for advanced profile we may need to parse and unescape data
      if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
          int buf_size2 = 0;
@@@ -694,23 -638,13 +681,19 @@@
                      break;
                  case VC1_CODE_FIELD: {
                      int buf_size3;
-                     if (avctx->hwaccel
- #if FF_API_CAP_VDPAU
-                         || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU
- #endif
-                         )
 -                    tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
 -                    if (!tmp)
++                    if (avctx->hwaccel)
 +                        buf_start_second_field = start;
 +                    tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
 +                    if (!tmp) {
 +                        ret = AVERROR(ENOMEM);
                          goto err;
 +                    }
                      slices = tmp;
                      slices[n_slices].buf = av_mallocz(buf_size + AV_INPUT_BUFFER_PADDING_SIZE);
 -                    if (!slices[n_slices].buf)
 +                    if (!slices[n_slices].buf) {
 +                        ret = AVERROR(ENOMEM);
                          goto err;
 +                    }
                      buf_size3 = vc1_unescape_buffer(start + 4, size,
                                                      slices[n_slices].buf);
                      init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
@@@ -761,26 -687,15 +744,22 @@@
              divider = find_next_marker(buf, buf + buf_size);
              if ((divider == (buf + buf_size)) || AV_RB32(divider) != VC1_CODE_FIELD) {
                  av_log(avctx, AV_LOG_ERROR, "Error in WVC1 interlaced frame\n");
 +                ret = AVERROR_INVALIDDATA;
                  goto err;
              } else { // found field marker, unescape second field
-                 if (avctx->hwaccel
- #if FF_API_CAP_VDPAU
-                     || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU
- #endif
-                     )
 -                tmp = av_realloc(slices, sizeof(*slices) * (n_slices+1));
 -                if (!tmp)
++                if (avctx->hwaccel)
 +                    buf_start_second_field = divider;
 +                tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
 +                if (!tmp) {
 +                    ret = AVERROR(ENOMEM);
                      goto err;
 +                }
                  slices = tmp;
                  slices[n_slices].buf = av_mallocz(buf_size + AV_INPUT_BUFFER_PADDING_SIZE);
 -                if (!slices[n_slices].buf)
 +                if (!slices[n_slices].buf) {
 +                    ret = AVERROR(ENOMEM);
                      goto err;
 +                }
                  buf_size3 = vc1_unescape_buffer(divider + 4, buf + buf_size - divider - 4, slices[n_slices].buf);
                  init_get_bits(&slices[n_slices].gb, slices[n_slices].buf,
                                buf_size3 << 3);
@@@ -917,84 -811,13 +896,73 @@@
      s->me.qpel_put = s->qdsp.put_qpel_pixels_tab;
      s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
  
- #if FF_API_CAP_VDPAU
-     if ((CONFIG_VC1_VDPAU_DECODER)
-         &&s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) {
-         if (v->field_mode && buf_start_second_field) {
-             ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
-             ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
-         } else {
-             ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
-         }
-     } else
- #endif
      if (avctx->hwaccel) {
 -        if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
 -            goto err;
 -        if (avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start) < 0)
 -            goto err;
 -        if (avctx->hwaccel->end_frame(avctx) < 0)
 -            goto err;
 +        s->mb_y = 0;
 +        if (v->field_mode && buf_start_second_field) {
 +            // decode first field
 +            s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
 +            if ((ret = avctx->hwaccel->start_frame(avctx, buf_start, buf_start_second_field - buf_start)) < 0)
 +                goto err;
 +            if ((ret = avctx->hwaccel->decode_slice(avctx, buf_start, buf_start_second_field - buf_start)) < 0)
 +                goto err;
 +            if ((ret = avctx->hwaccel->end_frame(avctx)) < 0)
 +                goto err;
 +
 +            // decode second field
 +            s->gb = slices[n_slices1 + 1].gb;
 +            s->picture_structure = PICT_TOP_FIELD + v->tff;
 +            v->second_field = 1;
 +            v->pic_header_flag = 0;
 +            if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
 +                av_log(avctx, AV_LOG_ERROR, "parsing header for second field failed");
 +                ret = AVERROR_INVALIDDATA;
 +                goto err;
 +            }
 +            v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
 +
 +            if ((ret = avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field)) < 0)
 +                goto err;
 +            if ((ret = avctx->hwaccel->decode_slice(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field)) < 0)
 +                goto err;
 +            if ((ret = avctx->hwaccel->end_frame(avctx)) < 0)
 +                goto err;
 +        } else {
 +            s->picture_structure = PICT_FRAME;
 +            if ((ret = avctx->hwaccel->start_frame(avctx, buf_start, (buf + buf_size) - buf_start)) < 0)
 +                goto err;
 +
 +            if (n_slices == 0) {
 +                // no slices, decode the frame as-is
 +                if ((ret = avctx->hwaccel->decode_slice(avctx, buf_start, (buf + buf_size) - buf_start)) < 0)
 +                    goto err;
 +            } else {
 +                // decode the frame part as the first slice
 +                if ((ret = avctx->hwaccel->decode_slice(avctx, buf_start, slices[0].rawbuf - buf_start)) < 0)
 +                    goto err;
 +
 +                // and process the slices as additional slices afterwards
 +                for (i = 0 ; i < n_slices; i++) {
 +                    s->gb = slices[i].gb;
 +                    s->mb_y = slices[i].mby_start;
 +
 +                    v->pic_header_flag = get_bits1(&s->gb);
 +                    if (v->pic_header_flag) {
 +                        if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
 +                            av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
 +                            ret = AVERROR_INVALIDDATA;
 +                            if (avctx->err_recognition & AV_EF_EXPLODE)
 +                                goto err;
 +                            continue;
 +                        }
 +                    }
 +
 +                    if ((ret = avctx->hwaccel->decode_slice(avctx, slices[i].rawbuf, slices[i].raw_size)) < 0)
 +                        goto err;
 +                }
 +            }
 +            if ((ret = avctx->hwaccel->end_frame(avctx)) < 0)
 +                goto err;
 +        }
      } else {
          int header_ret = 0;
  
diff --cc libavcodec/version.h
index 1431d94d76,3d7e47c729..0335a2868c
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@@ -45,21 -45,8 +45,18 @@@
   * FF_API_* defines may be placed below to indicate public API that will be
   * dropped at a future version bump. The defines themselves are not part of
   * the public API and may change, break or disappear at any time.
 + *
 + * @note, when bumping the major version it is recommended to manually
 + * disable each FF_API_* in its own commit instead of disabling them all
 + * at once through the bump. This improves the git bisect-ability of the change.
   */
  
 +#ifndef FF_API_VIMA_DECODER
 +#define FF_API_VIMA_DECODER     (LIBAVCODEC_VERSION_MAJOR < 58)
 +#endif
 +#ifndef FF_API_LOWRES
 +#define FF_API_LOWRES            (LIBAVCODEC_VERSION_MAJOR < 59)
 +#endif
- #ifndef FF_API_CAP_VDPAU
- #define FF_API_CAP_VDPAU         (LIBAVCODEC_VERSION_MAJOR < 58)
- #endif
  #ifndef FF_API_BUFS_VDPAU
  #define FF_API_BUFS_VDPAU        (LIBAVCODEC_VERSION_MAJOR < 58)
  #endif




More information about the ffmpeg-cvslog mailing list