[FFmpeg-cvslog] Merge commit '3176217c60ca7828712985092d9102d331ea4f3d'

Clément Bœsch git at videolan.org
Sun Jun 12 13:34:29 CEST 2016


ffmpeg | branch: master | Clément Bœsch <u at pkh.me> | Sun Jun 12 13:24:27 2016 +0200| [1534ef87c74cc66a117bf61c467641c2129bc964] | committer: Clément Bœsch

Merge commit '3176217c60ca7828712985092d9102d331ea4f3d'

* commit '3176217c60ca7828712985092d9102d331ea4f3d':
  h264: decouple h264_ps from the h264 decoder

Main changes:

- a local GetBitContext is created for the various
  ff_h264_decode_seq_parameter_set() attempts

- just like the old code, remove_sps() is adjusted so it doesn't remove
  the pps.

  Fixes decode with Ticket #631
  http://ffmpeg.org/pipermail/ffmpeg-user/attachments/20111108/dae58f17/attachment.mp4
  but see next point as well.

- ff_h264_update_thread_context() is updated to work even when SPS
  isn't set as it breaks current skip_frame code. This makes sure we
  can still decode the sample from ticket #631 without the need for
  -flags2 +chunks. (Thanks to Michael)

- keep {sps,pps}_ref pointers that stay alive even when the active
  pps/sps get removed from the available lists (patch by michaelni with
  additionnal frees in ff_h264_free_context() from mateo)

- added a check on sps in avpriv_h264_has_num_reorder_frames() to fix
  crashes with mpegts_with_dvbsubs.ts from Ticket #4074
  http://samples.ffmpeg.org/ffmpeg-bugs/trac/ticket4074/mpegts_with_dvbsubs.ts

- in h264_parser.c:h264_parse(), after the ff_h264_decode_extradata() is
  called, the pps and sps from the local parser context are updated with
  the pps and sps from the used h264context. This fixes fate-flv-demux.

- in h264_slice.c, "PPS changed between slices" error is not triggered
  anymore in one condition as it makes fate-h264-xavc-4389 fails with
  THREADS=N (Thanks to Michael)

Merged-by: Clément Bœsch <clement at stupeflix.com>
Merged-by: Michael Niedermayer <michael at niedermayer.cc>
Merged-by: Matthieu Bouron <matthieu.bouron at stupeflix.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=1534ef87c74cc66a117bf61c467641c2129bc964
---

 libavcodec/dxva2_h264.c       |   81 +++----
 libavcodec/h264.c             |   85 +++----
 libavcodec/h264.h             |   53 +++--
 libavcodec/h264_cabac.c       |   25 ++-
 libavcodec/h264_cavlc.c       |   22 +-
 libavcodec/h264_direct.c      |    6 +-
 libavcodec/h264_loopfilter.c  |    8 +-
 libavcodec/h264_mb.c          |    8 +-
 libavcodec/h264_mb_template.c |   20 +-
 libavcodec/h264_mvpred.h      |    2 +-
 libavcodec/h264_parser.c      |  104 ++++++---
 libavcodec/h264_ps.c          |  497 +++++++++++++++++++++++++----------------
 libavcodec/h264_refs.c        |   19 +-
 libavcodec/h264_sei.c         |   15 +-
 libavcodec/h264_slice.c       |  415 ++++++++++++++--------------------
 libavcodec/vaapi_h264.c       |   66 +++---
 libavcodec/vdpau.c            |   48 ++--
 libavcodec/vdpau_h264.c       |   54 ++---
 libavcodec/videotoolbox.c     |   14 +-
 19 files changed, 824 insertions(+), 718 deletions(-)

diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c
index bcba875..bd1fa1e 100644
--- a/libavcodec/dxva2_h264.c
+++ b/libavcodec/dxva2_h264.c
@@ -52,6 +52,8 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *
                                     DXVA_PicParams_H264 *pp)
 {
     const H264Picture *current_picture = h->cur_pic_ptr;
+    const SPS *sps = h->ps.sps;
+    const PPS *pps = h->ps.pps;
     int i, j;
 
     memset(pp, 0, sizeof(*pp));
@@ -96,30 +98,30 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *
 
     pp->wFrameWidthInMbsMinus1        = h->mb_width  - 1;
     pp->wFrameHeightInMbsMinus1       = h->mb_height - 1;
-    pp->num_ref_frames                = h->sps.ref_frame_count;
+    pp->num_ref_frames                = sps->ref_frame_count;
 
     pp->wBitFields                    = ((h->picture_structure != PICT_FRAME) <<  0) |
-                                        ((h->sps.mb_aff &&
+                                        ((sps->mb_aff &&
                                         (h->picture_structure == PICT_FRAME)) <<  1) |
-                                        (h->sps.residual_color_transform_flag <<  2) |
+                                        (sps->residual_color_transform_flag   <<  2) |
                                         /* sp_for_switch_flag (not implemented by FFmpeg) */
                                         (0                                    <<  3) |
-                                        (h->sps.chroma_format_idc             <<  4) |
+                                        (sps->chroma_format_idc               <<  4) |
                                         ((h->nal_ref_idc != 0)                <<  6) |
-                                        (h->pps.constrained_intra_pred        <<  7) |
-                                        (h->pps.weighted_pred                 <<  8) |
-                                        (h->pps.weighted_bipred_idc           <<  9) |
+                                        (pps->constrained_intra_pred          <<  7) |
+                                        (pps->weighted_pred                   <<  8) |
+                                        (pps->weighted_bipred_idc             <<  9) |
                                         /* MbsConsecutiveFlag */
                                         (1                                    << 11) |
-                                        (h->sps.frame_mbs_only_flag           << 12) |
-                                        (h->pps.transform_8x8_mode            << 13) |
-                                        ((h->sps.level_idc >= 31)             << 14) |
+                                        (sps->frame_mbs_only_flag             << 12) |
+                                        (pps->transform_8x8_mode              << 13) |
+                                        ((sps->level_idc >= 31)               << 14) |
                                         /* IntraPicFlag (Modified if we detect a non
                                          * intra slice in dxva2_h264_decode_slice) */
                                         (1                                    << 15);
 
-    pp->bit_depth_luma_minus8         = h->sps.bit_depth_luma - 8;
-    pp->bit_depth_chroma_minus8       = h->sps.bit_depth_chroma - 8;
+    pp->bit_depth_luma_minus8         = sps->bit_depth_luma - 8;
+    pp->bit_depth_chroma_minus8       = sps->bit_depth_chroma - 8;
     if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG)
         pp->Reserved16Bits            = 0;
     else if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO)
@@ -135,28 +137,28 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *
     if ((h->picture_structure & PICT_BOTTOM_FIELD) &&
         current_picture->field_poc[1] != INT_MAX)
         pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1];
-    pp->pic_init_qs_minus26           = h->pps.init_qs - 26;
-    pp->chroma_qp_index_offset        = h->pps.chroma_qp_index_offset[0];
-    pp->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
+    pp->pic_init_qs_minus26           = pps->init_qs - 26;
+    pp->chroma_qp_index_offset        = pps->chroma_qp_index_offset[0];
+    pp->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1];
     pp->ContinuationFlag              = 1;
-    pp->pic_init_qp_minus26           = h->pps.init_qp - 26;
-    pp->num_ref_idx_l0_active_minus1  = h->pps.ref_count[0] - 1;
-    pp->num_ref_idx_l1_active_minus1  = h->pps.ref_count[1] - 1;
+    pp->pic_init_qp_minus26           = pps->init_qp - 26;
+    pp->num_ref_idx_l0_active_minus1  = pps->ref_count[0] - 1;
+    pp->num_ref_idx_l1_active_minus1  = pps->ref_count[1] - 1;
     pp->Reserved8BitsA                = 0;
     pp->frame_num                     = h->frame_num;
-    pp->log2_max_frame_num_minus4     = h->sps.log2_max_frame_num - 4;
-    pp->pic_order_cnt_type            = h->sps.poc_type;
-    if (h->sps.poc_type == 0)
-        pp->log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;
-    else if (h->sps.poc_type == 1)
-        pp->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
-    pp->direct_8x8_inference_flag     = h->sps.direct_8x8_inference_flag;
-    pp->entropy_coding_mode_flag      = h->pps.cabac;
-    pp->pic_order_present_flag        = h->pps.pic_order_present;
-    pp->num_slice_groups_minus1       = h->pps.slice_group_count - 1;
-    pp->slice_group_map_type          = h->pps.mb_slice_group_map_type;
-    pp->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
-    pp->redundant_pic_cnt_present_flag= h->pps.redundant_pic_cnt_present;
+    pp->log2_max_frame_num_minus4     = sps->log2_max_frame_num - 4;
+    pp->pic_order_cnt_type            = sps->poc_type;
+    if (sps->poc_type == 0)
+        pp->log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4;
+    else if (sps->poc_type == 1)
+        pp->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
+    pp->direct_8x8_inference_flag     = sps->direct_8x8_inference_flag;
+    pp->entropy_coding_mode_flag      = pps->cabac;
+    pp->pic_order_present_flag        = pps->pic_order_present;
+    pp->num_slice_groups_minus1       = pps->slice_group_count - 1;
+    pp->slice_group_map_type          = pps->mb_slice_group_map_type;
+    pp->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
+    pp->redundant_pic_cnt_present_flag= pps->redundant_pic_cnt_present;
     pp->Reserved8BitsB                = 0;
     pp->slice_group_change_rate_minus1= 0;  /* XXX not implemented by FFmpeg */
     //pp->SliceGroupMap[810];               /* XXX not implemented by FFmpeg */
@@ -164,25 +166,26 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *
 
 static void fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx, const H264Context *h, DXVA_Qmatrix_H264 *qm)
 {
+    const PPS *pps = h->ps.pps;
     unsigned i, j;
     memset(qm, 0, sizeof(*qm));
     if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) {
         for (i = 0; i < 6; i++)
             for (j = 0; j < 16; j++)
-                qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][j];
+                qm->bScalingLists4x4[i][j] = pps->scaling_matrix4[i][j];
 
         for (i = 0; i < 64; i++) {
-            qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][i];
-            qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][i];
+            qm->bScalingLists8x8[0][i] = pps->scaling_matrix8[0][i];
+            qm->bScalingLists8x8[1][i] = pps->scaling_matrix8[3][i];
         }
     } else {
         for (i = 0; i < 6; i++)
             for (j = 0; j < 16; j++)
-                qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][ff_zigzag_scan[j]];
+                qm->bScalingLists4x4[i][j] = pps->scaling_matrix4[i][ff_zigzag_scan[j]];
 
         for (i = 0; i < 64; i++) {
-            qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][ff_zigzag_direct[i]];
-            qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][ff_zigzag_direct[i]];
+            qm->bScalingLists8x8[0][i] = pps->scaling_matrix8[0][ff_zigzag_direct[i]];
+            qm->bScalingLists8x8[1][i] = pps->scaling_matrix8[3][ff_zigzag_direct[i]];
         }
     }
 }
@@ -282,11 +285,11 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
         }
     }
     slice->slice_qs_delta    = 0; /* XXX not implemented by FFmpeg */
-    slice->slice_qp_delta    = sl->qscale - h->pps.init_qp;
+    slice->slice_qp_delta    = sl->qscale - h->ps.pps->init_qp;
     slice->redundant_pic_cnt = sl->redundant_pic_count;
     if (sl->slice_type == AV_PICTURE_TYPE_B)
         slice->direct_spatial_mv_pred_flag = sl->direct_spatial_mv_pred;
-    slice->cabac_init_idc = h->pps.cabac ? sl->cabac_init_idc : 0;
+    slice->cabac_init_idc = h->ps.pps->cabac ? sl->cabac_init_idc : 0;
     if (sl->deblocking_filter < 2)
         slice->disable_deblocking_filter_idc = 1 - sl->deblocking_filter;
     else
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index c011527..0de6d91 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -60,7 +60,7 @@ const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
 int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
 {
     H264Context *h = avctx->priv_data;
-    return h ? h->sps.num_reorder_frames : 0;
+    return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
 }
 
 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
@@ -224,9 +224,6 @@ int ff_h264_alloc_tables(H264Context *h)
             h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
         }
 
-    if (!h->dequant4_coeff[0])
-        ff_h264_init_dequant_tables(h);
-
     return 0;
 
 fail:
@@ -425,7 +422,6 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
     h->backup_width          = -1;
     h->backup_height         = -1;
     h->backup_pix_fmt        = AV_PIX_FMT_NONE;
-    h->dequant_coeff_pps     = -1;
     h->current_sps_id        = -1;
     h->cur_chroma_format_idc = -1;
 
@@ -514,9 +510,9 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
         }
     }
 
-    if (h->sps.bitstream_restriction_flag &&
-        h->avctx->has_b_frames < h->sps.num_reorder_frames) {
-        h->avctx->has_b_frames = h->sps.num_reorder_frames;
+    if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
+        h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
+        h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
         h->low_delay           = 0;
     }
 
@@ -567,6 +563,7 @@ static int decode_init_thread_copy(AVCodecContext *avctx)
  */
 static void decode_postinit(H264Context *h, int setup_finished)
 {
+    const SPS *sps = h->ps.sps;
     H264Picture *out = h->cur_pic_ptr;
     H264Picture *cur = h->cur_pic_ptr;
     int i, pics, out_of_order, out_idx;
@@ -596,7 +593,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
     /* Prioritize picture timing SEI information over used
      * decoding process if it exists. */
 
-    if (h->sps.pic_struct_present_flag) {
+    if (sps->pic_struct_present_flag) {
         switch (h->sei_pic_struct) {
         case SEI_PIC_STRUCT_FRAME:
             break;
@@ -640,7 +637,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
         /* Derive top_field_first from field pocs. */
         cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
     } else {
-        if (h->sps.pic_struct_present_flag) {
+        if (sps->pic_struct_present_flag) {
             /* Use picture timing SEI information. Even if it is a
              * information of a past frame, better than nothing. */
             if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
@@ -737,9 +734,9 @@ static void decode_postinit(H264Context *h, int setup_finished)
     // FIXME do something with unavailable reference frames
 
     /* Sort B-frames into display order */
-    if (h->sps.bitstream_restriction_flag ||
+    if (sps->bitstream_restriction_flag ||
         h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
-        h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, h->sps.num_reorder_frames);
+        h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
     }
     h->low_delay = !h->avctx->has_b_frames;
 
@@ -762,7 +759,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
             h->last_pocs[i] = INT_MIN;
         h->last_pocs[0] = cur->poc;
         cur->mmco_reset = 1;
-    } else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
+    } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
         av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order);
         h->avctx->has_b_frames = out_of_order;
         h->low_delay = 0;
@@ -894,15 +891,16 @@ static void flush_dpb(AVCodecContext *avctx)
 
 int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
 {
-    const int max_frame_num = 1 << h->sps.log2_max_frame_num;
+    const SPS *sps = h->ps.sps;
+    const int max_frame_num = 1 << sps->log2_max_frame_num;
     int field_poc[2];
 
     h->frame_num_offset = h->prev_frame_num_offset;
     if (h->frame_num < h->prev_frame_num)
         h->frame_num_offset += max_frame_num;
 
-    if (h->sps.poc_type == 0) {
-        const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
+    if (sps->poc_type == 0) {
+        const int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
 
         if (h->poc_lsb < h->prev_poc_lsb &&
             h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
@@ -916,11 +914,11 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
         field_poc[1] = h->poc_msb + h->poc_lsb;
         if (h->picture_structure == PICT_FRAME)
             field_poc[1] += h->delta_poc_bottom;
-    } else if (h->sps.poc_type == 1) {
+    } else if (sps->poc_type == 1) {
         int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
         int i;
 
-        if (h->sps.poc_cycle_length != 0)
+        if (sps->poc_cycle_length != 0)
             abs_frame_num = h->frame_num_offset + h->frame_num;
         else
             abs_frame_num = 0;
@@ -929,25 +927,25 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
             abs_frame_num--;
 
         expected_delta_per_poc_cycle = 0;
-        for (i = 0; i < h->sps.poc_cycle_length; i++)
+        for (i = 0; i < sps->poc_cycle_length; i++)
             // FIXME integrate during sps parse
-            expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
+            expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i];
 
         if (abs_frame_num > 0) {
-            int poc_cycle_cnt          = (abs_frame_num - 1) / h->sps.poc_cycle_length;
-            int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
+            int poc_cycle_cnt          = (abs_frame_num - 1) / sps->poc_cycle_length;
+            int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length;
 
             expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
             for (i = 0; i <= frame_num_in_poc_cycle; i++)
-                expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
+                expectedpoc = expectedpoc + sps->offset_for_ref_frame[i];
         } else
             expectedpoc = 0;
 
         if (h->nal_ref_idc == 0)
-            expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
+            expectedpoc = expectedpoc + sps->offset_for_non_ref_pic;
 
         field_poc[0] = expectedpoc + h->delta_poc[0];
-        field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
+        field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field;
 
         if (h->picture_structure == PICT_FRAME)
             field_poc[1] += h->delta_poc[1];
@@ -977,7 +975,7 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
  *
  * @return profile as defined by FF_PROFILE_H264_*
  */
-int ff_h264_get_profile(SPS *sps)
+int ff_h264_get_profile(const SPS *sps)
 {
     int profile = sps->profile_idc;
 
@@ -1154,8 +1152,8 @@ again:
                     h->valid_recovery_point = 1;
 
                 if (   h->recovery_frame < 0
-                    || av_mod_uintp2(h->recovery_frame - h->frame_num, h->sps.log2_max_frame_num) > h->sei_recovery_frame_cnt) {
-                    h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->sps.log2_max_frame_num);
+                    || av_mod_uintp2(h->recovery_frame - h->frame_num, h->ps.sps->log2_max_frame_num) > h->sei_recovery_frame_cnt) {
+                    h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
 
                     if (!h->valid_recovery_point)
                         h->recovery_frame = h->frame_num;
@@ -1225,22 +1223,21 @@ again:
             if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                 goto end;
             break;
-        case NAL_SPS:
-            h->gb = nal->gb;
-            if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
+        case NAL_SPS: {
+            GetBitContext tmp_gb = nal->gb;
+            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
                 break;
             av_log(h->avctx, AV_LOG_DEBUG,
                    "SPS decoding failure, trying again with the complete NAL\n");
-            init_get_bits8(&h->gb, nal->raw_data + 1, nal->raw_size - 1);
-            if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
+            init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
+            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
                 break;
-            h->gb = nal->gb;
-            ff_h264_decode_seq_parameter_set(h, 1);
-
+            ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
             break;
+        }
         case NAL_PPS:
-            h->gb = nal->gb;
-            ret = ff_h264_decode_picture_parameter_set(h, nal->size_bits);
+            ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
+                                                       nal->size_bits);
             if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                 goto end;
             break;
@@ -1305,7 +1302,10 @@ end:
      * past end by one (callers fault) and resync_mb_y != 0
      * causes problems for the first MB line, too.
      */
-    if (!FIELD_PICTURE(h) && h->current_slice && !h->sps.new && h->enable_er) {
+    if (!FIELD_PICTURE(h) && h->current_slice &&
+        h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data &&
+        h->enable_er) {
+
         H264SliceContext *sl = h->slice_ctx;
         int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
 
@@ -1585,10 +1585,13 @@ av_cold void ff_h264_free_context(H264Context *h)
     av_freep(&h->a53_caption);
 
     for (i = 0; i < MAX_SPS_COUNT; i++)
-        av_freep(h->sps_buffers + i);
+        av_buffer_unref(&h->ps.sps_list[i]);
 
     for (i = 0; i < MAX_PPS_COUNT; i++)
-        av_freep(h->pps_buffers + i);
+        av_buffer_unref(&h->ps.pps_list[i]);
+
+    av_buffer_unref(&h->ps.sps_ref);
+    av_buffer_unref(&h->ps.pps_ref);
 
     ff_h2645_packet_uninit(&h->pkt);
 }
diff --git a/libavcodec/h264.h b/libavcodec/h264.h
index 33bc509..264b447 100644
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@ -28,6 +28,7 @@
 #ifndef AVCODEC_H264_H
 #define AVCODEC_H264_H
 
+#include "libavutil/buffer.h"
 #include "libavutil/intreadwrite.h"
 #include "libavutil/thread.h"
 #include "cabac.h"
@@ -94,12 +95,12 @@
 #define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
 
 #ifndef CABAC
-#define CABAC(h) (h)->pps.cabac
+#define CABAC(h) (h)->ps.pps->cabac
 #endif
 
-#define CHROMA(h)    ((h)->sps.chroma_format_idc)
-#define CHROMA422(h) ((h)->sps.chroma_format_idc == 2)
-#define CHROMA444(h) ((h)->sps.chroma_format_idc == 3)
+#define CHROMA(h)    ((h)->ps.sps->chroma_format_idc)
+#define CHROMA422(h) ((h)->ps.sps->chroma_format_idc == 2)
+#define CHROMA444(h) ((h)->ps.sps->chroma_format_idc == 3)
 
 #define EXTENDED_SAR       255
 
@@ -231,7 +232,6 @@ typedef struct SPS {
     int bit_depth_chroma;                 ///< bit_depth_chroma_minus8 + 8
     int residual_color_transform_flag;    ///< residual_colour_transform_flag
     int constraint_set_flags;             ///< constraint_set[0-3]_flag
-    int new;                              ///< flag to keep track if the decoder context needs re-init due to changed SPS
     uint8_t data[4096];
     size_t data_size;
 } SPS;
@@ -261,8 +261,25 @@ typedef struct PPS {
     int chroma_qp_diff;
     uint8_t data[4096];
     size_t data_size;
+
+    uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
+    uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
+    uint32_t(*dequant4_coeff[6])[16];
+    uint32_t(*dequant8_coeff[6])[64];
 } PPS;
 
+typedef struct H264ParamSets {
+    AVBufferRef *sps_list[MAX_SPS_COUNT];
+    AVBufferRef *pps_list[MAX_PPS_COUNT];
+
+    AVBufferRef *pps_ref;
+    AVBufferRef *sps_ref;
+    /* currently active parameters sets */
+    const PPS *pps;
+    // FIXME this should properly be const
+    SPS *sps;
+} H264ParamSets;
+
 /**
  * Frame Packing Arrangement Type
  */
@@ -572,16 +589,9 @@ typedef struct H264Context {
 
 
     unsigned current_sps_id; ///< id of the current SPS
-    SPS sps; ///< current sps
-    PPS pps; ///< current pps
 
     int au_pps_id; ///< pps_id of current access unit
 
-    uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16]; // FIXME should these be moved down?
-    uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
-    uint32_t(*dequant4_coeff[6])[16];
-    uint32_t(*dequant8_coeff[6])[64];
-
     uint16_t *slice_table;      ///< slice_table_base + 2*mb_stride + 1
 
     // interlacing specific flags
@@ -634,10 +644,7 @@ typedef struct H264Context {
     int bit_depth_luma;         ///< luma bit depth from sps to detect changes
     int chroma_format_idc;      ///< chroma format from sps to detect changes
 
-    SPS *sps_buffers[MAX_SPS_COUNT];
-    PPS *pps_buffers[MAX_PPS_COUNT];
-
-    int dequant_coeff_pps;      ///< reinit tables when pps changes
+    H264ParamSets ps;
 
     uint16_t *slice_table_base;
 
@@ -848,17 +855,19 @@ int ff_h264_decode_sei(H264Context *h);
 /**
  * Decode SPS
  */
-int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation);
+int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
+                                     H264ParamSets *ps, int ignore_truncation);
 
 /**
  * compute profile from sps
  */
-int ff_h264_get_profile(SPS *sps);
+int ff_h264_get_profile(const SPS *sps);
 
 /**
  * Decode PPS
  */
-int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length);
+int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
+                                         H264ParamSets *ps, int bit_length);
 
 /**
  * Free any data that may have been allocated in the H264 context
@@ -910,7 +919,7 @@ int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl);
 
 void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl);
 
-void ff_h264_init_dequant_tables(H264Context *h);
+void ff_h264_init_dequant_tables(PPS *pps, const SPS *sps);
 
 void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl);
 void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl);
@@ -1010,7 +1019,7 @@ static av_always_inline uint16_t pack8to16(unsigned a, unsigned b)
  */
 static av_always_inline int get_chroma_qp(const H264Context *h, int t, int qscale)
 {
-    return h->pps.chroma_qp_table[t][qscale];
+    return h->ps.pps->chroma_qp_table[t][qscale];
 }
 
 /**
@@ -1133,7 +1142,7 @@ static av_always_inline void write_back_motion(const H264Context *h,
 
 static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
 {
-    if (h->sps.direct_8x8_inference_flag)
+    if (h->ps.sps->direct_8x8_inference_flag)
         return !(AV_RN64A(sl->sub_mb_type) &
                  ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
                   0x0001000100010001ULL));
diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c
index 3df0f70..ddabe3b 100644
--- a/libavcodec/h264_cabac.c
+++ b/libavcodec/h264_cabac.c
@@ -1265,7 +1265,7 @@ void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
 {
     int i;
     const int8_t (*tab)[2];
-    const int slice_qp = av_clip(sl->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51);
+    const int slice_qp = av_clip(sl->qscale - 6*(h->ps.sps->bit_depth_luma-8), 0, 51);
 
     if (sl->slice_type_nos == AV_PICTURE_TYPE_I) tab = cabac_context_init_I;
     else                                 tab = cabac_context_init_PB[sl->cabac_init_idc];
@@ -1876,7 +1876,7 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2
         decode_cabac_residual_dc(h, sl, sl->mb_luma_dc[p], ctx_cat[0][p], LUMA_DC_BLOCK_INDEX+p, scan, 16);
 
         if( cbp&15 ) {
-            qmul = h->dequant4_coeff[p][qscale];
+            qmul = h->ps.pps->dequant4_coeff[p][qscale];
             for( i4x4 = 0; i4x4 < 16; i4x4++ ) {
                 const int index = 16*p + i4x4;
                 decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[1][p], index, scan + 1, qmul, 15);
@@ -1891,9 +1891,9 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2
                 if( IS_8x8DCT(mb_type) ) {
                     const int index = 16*p + 4*i8x8;
                     decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[3][p], index,
-                                                scan8x8, h->dequant8_coeff[cqm][qscale], 64);
+                                                scan8x8, h->ps.pps->dequant8_coeff[cqm][qscale], 64);
                 } else {
-                    qmul = h->dequant4_coeff[cqm][qscale];
+                    qmul = h->ps.pps->dequant4_coeff[cqm][qscale];
                     for( i4x4 = 0; i4x4 < 4; i4x4++ ) {
                         const int index = 16*p + 4*i8x8 + i4x4;
 //START_TIMER
@@ -1914,10 +1914,11 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2
  */
 int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
 {
+    const SPS *sps = h->ps.sps;
     int mb_xy;
     int mb_type, partition_count, cbp = 0;
-    int dct8x8_allowed= h->pps.transform_8x8_mode;
-    int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2;
+    int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
+    int decode_chroma = sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2;
     const int pixel_shift = h->pixel_shift;
 
     mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
@@ -2027,8 +2028,8 @@ decode_intra_mb:
     h->slice_table[mb_xy] = sl->slice_num;
 
     if(IS_INTRA_PCM(mb_type)) {
-        const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
-                            h->sps.bit_depth_luma >> 3;
+        const int mb_size = ff_h264_mb_sizes[sps->chroma_format_idc] *
+                            sps->bit_depth_luma >> 3;
         const uint8_t *ptr;
         int ret;
 
@@ -2215,7 +2216,7 @@ decode_intra_mb:
         ff_h264_pred_direct_motion(h, sl, &mb_type);
         fill_rectangle(sl->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 2);
         fill_rectangle(sl->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 2);
-        dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
+        dct8x8_allowed &= sps->direct_8x8_inference_flag;
     } else {
         int list, i;
         if(IS_16X16(mb_type)){
@@ -2382,7 +2383,7 @@ decode_intra_mb:
         if(get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + (sl->last_qscale_diff != 0)])){
             int val = 1;
             int ctx= 2;
-            const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8);
+            const int max_qp = 51 + 6*(sps->bit_depth_luma-8);
 
             while( get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + ctx] ) ) {
                 ctx= 3;
@@ -2425,7 +2426,7 @@ decode_intra_mb:
                 int c, i, i8x8;
                 for( c = 0; c < 2; c++ ) {
                     int16_t *mb = sl->mb + (16*(16 + 16*c) << pixel_shift);
-                    qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]];
+                    qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]];
                     for (i8x8 = 0; i8x8 < 2; i8x8++) {
                         for (i = 0; i < 4; i++) {
                             const int index = 16 + 16 * c + 8*i8x8 + i;
@@ -2449,7 +2450,7 @@ decode_intra_mb:
             if( cbp&0x20 ) {
                 int c, i;
                 for( c = 0; c < 2; c++ ) {
-                    qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]];
+                    qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]];
                     for( i = 0; i < 4; i++ ) {
                         const int index = 16 + 16 * c + i;
                         decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), 4, index, scan + 1, qmul, 15);
diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c
index be53914..95b3778 100644
--- a/libavcodec/h264_cavlc.c
+++ b/libavcodec/h264_cavlc.c
@@ -656,7 +656,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl,
                 for(i4x4=0; i4x4<4; i4x4++){
                     const int index= i4x4 + 4*i8x8 + p*16;
                     if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift),
-                        index, scan + 1, h->dequant4_coeff[p][qscale], 15) < 0 ){
+                        index, scan + 1, h->ps.pps->dequant4_coeff[p][qscale], 15) < 0 ){
                         return -1;
                     }
                 }
@@ -678,7 +678,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl,
                     for(i4x4=0; i4x4<4; i4x4++){
                         const int index= i4x4 + 4*i8x8 + p*16;
                         if( decode_residual(h, sl, gb, buf, index, scan8x8+16*i4x4,
-                                            h->dequant8_coeff[cqm][qscale], 16) < 0 )
+                                            h->ps.pps->dequant8_coeff[cqm][qscale], 16) < 0 )
                             return -1;
                     }
                     nnz = &sl->non_zero_count_cache[scan8[4 * i8x8 + p * 16]];
@@ -688,7 +688,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl,
                     for(i4x4=0; i4x4<4; i4x4++){
                         const int index= i4x4 + 4*i8x8 + p*16;
                         if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index,
-                                            scan, h->dequant4_coeff[cqm][qscale], 16) < 0 ){
+                                            scan, h->ps.pps->dequant4_coeff[cqm][qscale], 16) < 0 ){
                             return -1;
                         }
                         new_cbp |= sl->non_zero_count_cache[scan8[index]] << i8x8;
@@ -708,8 +708,8 @@ int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
     int mb_xy;
     int partition_count;
     unsigned int mb_type, cbp;
-    int dct8x8_allowed= h->pps.transform_8x8_mode;
-    int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2;
+    int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
+    int decode_chroma = h->ps.sps->chroma_format_idc == 1 || h->ps.sps->chroma_format_idc == 2;
     const int pixel_shift = h->pixel_shift;
 
     mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
@@ -775,8 +775,8 @@ decode_intra_mb:
     h->slice_table[mb_xy] = sl->slice_num;
 
     if(IS_INTRA_PCM(mb_type)){
-        const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
-                            h->sps.bit_depth_luma;
+        const int mb_size = ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] *
+                            h->ps.sps->bit_depth_luma;
 
         // We assume these blocks are very rare so we do not optimize it.
         sl->intra_pcm_ptr = align_get_bits(&sl->gb);
@@ -949,7 +949,7 @@ decode_intra_mb:
         }
     }else if(IS_DIRECT(mb_type)){
         ff_h264_pred_direct_motion(h, sl, &mb_type);
-        dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
+        dct8x8_allowed &= h->ps.sps->direct_8x8_inference_flag;
     }else{
         int list, mx, my, i;
          //FIXME we should set ref_idx_l? to 0 if we use that later ...
@@ -1104,7 +1104,7 @@ decode_intra_mb:
         int ret;
         GetBitContext *gb = &sl->gb;
         const uint8_t *scan, *scan8x8;
-        const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8);
+        const int max_qp = 51 + 6 * (h->ps.sps->bit_depth_luma - 8);
 
         if(IS_INTERLACED(mb_type)){
             scan8x8 = sl->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0;
@@ -1142,7 +1142,7 @@ decode_intra_mb:
                 return -1;
             }
         } else {
-            const int num_c8x8 = h->sps.chroma_format_idc;
+            const int num_c8x8 = h->ps.sps->chroma_format_idc;
 
             if(cbp&0x30){
                 for(chroma_idx=0; chroma_idx<2; chroma_idx++)
@@ -1156,7 +1156,7 @@ decode_intra_mb:
 
             if(cbp&0x20){
                 for(chroma_idx=0; chroma_idx<2; chroma_idx++){
-                    const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
+                    const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
                     int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) << pixel_shift);
                     for (i8x8 = 0; i8x8<num_c8x8; i8x8++) {
                         for (i4x4 = 0; i4x4 < 4; i4x4++) {
diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
index 5f66a67..0b3c025 100644
--- a/libavcodec/h264_direct.c
+++ b/libavcodec/h264_direct.c
@@ -315,7 +315,7 @@ single_col:
                 *mb_type |= MB_TYPE_DIRECT2 |
                             (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
             } else {
-                if (!h->sps.direct_8x8_inference_flag) {
+                if (!h->ps.sps->direct_8x8_inference_flag) {
                     /* FIXME: Save sub mb types from previous frames (or derive
                      * from MVs) so we know exactly what block size to use. */
                     sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */
@@ -538,7 +538,7 @@ single_col:
                 *mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
                             (mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
             } else {
-                if (!h->sps.direct_8x8_inference_flag) {
+                if (!h->ps.sps->direct_8x8_inference_flag) {
                     /* FIXME: save sub mb types from previous frames (or derive
                      * from MVs) so we know exactly what block size to use */
                     sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
@@ -579,7 +579,7 @@ single_col:
 
         if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
             int y_shift = 2 * !IS_INTERLACED(*mb_type);
-            assert(h->sps.direct_8x8_inference_flag);
+            assert(h->ps.sps->direct_8x8_inference_flag);
 
             for (i8 = 0; i8 < 4; i8++) {
                 const int x8 = i8 & 1;
diff --git a/libavcodec/h264_loopfilter.c b/libavcodec/h264_loopfilter.c
index 0014927..7431b5e 100644
--- a/libavcodec/h264_loopfilter.c
+++ b/libavcodec/h264_loopfilter.c
@@ -250,7 +250,7 @@ static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h,
     int left_type = sl->left_type[LTOP];
     int top_type  = sl->top_type;
 
-    int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
+    int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
     int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
     int b = 52 + sl->slice_beta_offset - qp_bd_offset;
 
@@ -420,7 +420,7 @@ void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl,
                             unsigned int linesize, unsigned int uvlinesize)
 {
     av_assert2(!FRAME_MBAFF(h));
-    if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) {
+    if(!h->h264dsp.h264_loop_filter_strength || h->ps.pps->chroma_qp_diff) {
         ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
         return;
     }
@@ -724,7 +724,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl,
     const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
     int first_vertical_edge_done = 0;
     int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
-    int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
+    int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
     int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
     int b = 52 + sl->slice_beta_offset - qp_bd_offset;
 
@@ -767,7 +767,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl,
                     bS[i] = 4;
                 else{
                     bS[i] = 1 + !!(sl->non_zero_count_cache[12+8*(i>>1)] |
-                         ((!h->pps.cabac && IS_8x8DCT(mbn_type)) ?
+                         ((!h->ps.pps->cabac && IS_8x8DCT(mbn_type)) ?
                             (h->cbp_table[mbn_xy] & (((MB_FIELD(sl) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12))
                                                                        :
                             h->non_zero_count[mbn_xy][ off[i] ]));
diff --git a/libavcodec/h264_mb.c b/libavcodec/h264_mb.c
index 3c5c932..75535ad 100644
--- a/libavcodec/h264_mb.c
+++ b/libavcodec/h264_mb.c
@@ -635,7 +635,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h,
             for (i = 0; i < 16; i += 4) {
                 uint8_t *const ptr = dest_y + block_offset[i];
                 const int dir      = sl->intra4x4_pred_mode_cache[scan8[i]];
-                if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
+                if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
                     if (h->x264_build != -1) {
                         h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
                     } else
@@ -666,7 +666,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h,
                 uint8_t *const ptr = dest_y + block_offset[i];
                 const int dir      = sl->intra4x4_pred_mode_cache[scan8[i]];
 
-                if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
+                if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
                     h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
                 } else {
                     uint8_t *topright;
@@ -705,7 +705,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h,
             if (!transform_bypass)
                 h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift),
                                                      sl->mb_luma_dc[p],
-                                                     h->dequant4_coeff[p][qscale][0]);
+                                                     h->ps.pps->dequant4_coeff[p][qscale][0]);
             else {
                 static const uint8_t dc_mapping[16] = {
                      0 * 16,  1 * 16,  4 * 16,  5 * 16,
@@ -737,7 +737,7 @@ static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264Sl
     if (!IS_INTRA4x4(mb_type)) {
         if (IS_INTRA16x16(mb_type)) {
             if (transform_bypass) {
-                if (h->sps.profile_idc == 244 &&
+                if (h->ps.sps->profile_idc == 244 &&
                     (sl->intra16x16_pred_mode == VERT_PRED8x8 ||
                      sl->intra16x16_pred_mode == HOR_PRED8x8)) {
                     h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset,
diff --git a/libavcodec/h264_mb_template.c b/libavcodec/h264_mb_template.c
index f7f8a93..f582b00 100644
--- a/libavcodec/h264_mb_template.c
+++ b/libavcodec/h264_mb_template.c
@@ -48,7 +48,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
     int linesize, uvlinesize /*dct_offset*/;
     int i, j;
     const int *block_offset = &h->block_offset[0];
-    const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass);
+    const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->ps.sps->transform_bypass);
     void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
     const int block_h   = 16 >> h->chroma_y_shift;
     const int chroma422 = CHROMA422(h);
@@ -96,12 +96,12 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
     }
 
     if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
-        const int bit_depth = h->sps.bit_depth_luma;
+        const int bit_depth = h->ps.sps->bit_depth_luma;
         if (PIXEL_SHIFT) {
             int j;
             GetBitContext gb;
             init_get_bits(&gb, sl->intra_pcm_ptr,
-                          ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth);
+                          ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] * bit_depth);
 
             for (i = 0; i < 16; i++) {
                 uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize);
@@ -109,7 +109,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
                     tmp_y[j] = get_bits(&gb, bit_depth);
             }
             if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
-                if (!h->sps.chroma_format_idc) {
+                if (!h->ps.sps->chroma_format_idc) {
                     for (i = 0; i < block_h; i++) {
                         uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
                         uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize);
@@ -134,7 +134,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
             for (i = 0; i < 16; i++)
                 memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16);
             if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
-                if (!h->sps.chroma_format_idc) {
+                if (!h->ps.sps->chroma_format_idc) {
                     for (i = 0; i < 8; i++) {
                         memset(dest_cb + i * uvlinesize, 1 << (bit_depth - 1), 8);
                         memset(dest_cr + i * uvlinesize, 1 << (bit_depth - 1), 8);
@@ -190,7 +190,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
             (sl->cbp & 0x30)) {
             uint8_t *dest[2] = { dest_cb, dest_cr };
             if (transform_bypass) {
-                if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 &&
+                if (IS_INTRA(mb_type) && h->ps.sps->profile_idc == 244 &&
                     (sl->chroma_pred_mode == VERT_PRED8x8 ||
                      sl->chroma_pred_mode == HOR_PRED8x8)) {
                     h->hpc.pred8x8_add[sl->chroma_pred_mode](dest[0],
@@ -231,10 +231,10 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
                 }
                 if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]])
                     h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 1 << PIXEL_SHIFT),
-                                                           h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
+                                                           h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
                 if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]])
                     h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 2 << PIXEL_SHIFT),
-                                                           h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
+                                                           h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
                 h->h264dsp.h264_idct_add8(dest, block_offset,
                                           sl->mb, uvlinesize,
                                           sl->non_zero_count_cache);
@@ -259,7 +259,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo
     int linesize;
     int i, j, p;
     const int *block_offset = &h->block_offset[0];
-    const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass);
+    const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->ps.sps->transform_bypass);
     const int plane_count      = (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) ? 3 : 1;
 
     for (p = 0; p < plane_count; p++) {
@@ -301,7 +301,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo
 
     if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
         if (PIXEL_SHIFT) {
-            const int bit_depth = h->sps.bit_depth_luma;
+            const int bit_depth = h->ps.sps->bit_depth_luma;
             GetBitContext gb;
             init_get_bits(&gb, sl->intra_pcm_ptr, 768 * bit_depth);
 
diff --git a/libavcodec/h264_mvpred.h b/libavcodec/h264_mvpred.h
index 763746c..2a01a27 100644
--- a/libavcodec/h264_mvpred.h
+++ b/libavcodec/h264_mvpred.h
@@ -464,7 +464,7 @@ static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int m
 
     if (!IS_SKIP(mb_type)) {
         if (IS_INTRA(mb_type)) {
-            int type_mask = h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
+            int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) : -1;
             sl->topleft_samples_available     =
                 sl->top_samples_available     =
                     sl->left_samples_available = 0xFFFF;
diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c
index 493ed19..0913452 100644
--- a/libavcodec/h264_parser.c
+++ b/libavcodec/h264_parser.c
@@ -47,6 +47,7 @@
 typedef struct H264ParseContext {
     H264Context h;
     ParseContext pc;
+    H264ParamSets ps;
     int got_first;
 } H264ParseContext;
 
@@ -148,13 +149,13 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
     int list_count, ref_count[2];
 
 
-    if (h->pps.redundant_pic_cnt_present)
+    if (p->ps.pps->redundant_pic_cnt_present)
         get_ue_golomb(gb); // redundant_pic_count
 
     if (slice_type_nos == AV_PICTURE_TYPE_B)
         get_bits1(gb); // direct_spatial_mv_pred
 
-    if (ff_h264_parse_ref_count(&list_count, ref_count, gb, &h->pps,
+    if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps,
                                 slice_type_nos, h->picture_structure, h->avctx) < 0)
         return AVERROR_INVALIDDATA;
 
@@ -186,9 +187,9 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
         }
     }
 
-    if ((h->pps.weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) ||
-        (h->pps.weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B))
-        ff_h264_pred_weight_table(gb, &h->sps, ref_count, slice_type_nos,
+    if ((p->ps.pps->weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) ||
+        (p->ps.pps->weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B))
+        ff_h264_pred_weight_table(gb, p->ps.sps, ref_count, slice_type_nos,
                                   &pwt);
 
     if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag
@@ -255,6 +256,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
     buf_index     = 0;
     next_avc      = h->is_avc ? 0 : buf_size;
     for (;;) {
+        const SPS *sps;
         int src_length, consumed, nalsize = 0;
 
         if (buf_index >= next_avc) {
@@ -307,13 +309,19 @@ static inline int parse_nal_units(AVCodecParserContext *s,
 
         switch (h->nal_unit_type) {
         case NAL_SPS:
-            ff_h264_decode_seq_parameter_set(h, 0);
+            ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps, 0);
             break;
         case NAL_PPS:
-            ff_h264_decode_picture_parameter_set(h, h->gb.size_in_bits);
+            ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps,
+                                                 nal.size_bits);
             break;
         case NAL_SEI:
-            ff_h264_decode_sei(h);
+            {
+                H264ParamSets ps = h->ps;
+                h->ps = p->ps;
+                ff_h264_decode_sei(h);
+                h->ps = ps;
+            }
             break;
         case NAL_IDR_SLICE:
             s->key_frame = 1;
@@ -337,33 +345,39 @@ static inline int parse_nal_units(AVCodecParserContext *s,
                        "pps_id %u out of range\n", pps_id);
                 goto fail;
             }
-            if (!h->pps_buffers[pps_id]) {
+            if (!p->ps.pps_list[pps_id]) {
                 av_log(h->avctx, AV_LOG_ERROR,
                        "non-existing PPS %u referenced\n", pps_id);
                 goto fail;
             }
-            h->pps = *h->pps_buffers[pps_id];
-            if (!h->sps_buffers[h->pps.sps_id]) {
+            p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
+            if (!p->ps.sps_list[p->ps.pps->sps_id]) {
                 av_log(h->avctx, AV_LOG_ERROR,
-                       "non-existing SPS %u referenced\n", h->pps.sps_id);
+                       "non-existing SPS %u referenced\n", p->ps.pps->sps_id);
                 goto fail;
             }
-            h->sps       = *h->sps_buffers[h->pps.sps_id];
-            h->frame_num = get_bits(&nal.gb, h->sps.log2_max_frame_num);
+            p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
+
+            h->ps.sps = p->ps.sps;
+            h->ps.pps = p->ps.pps;
+            sps = p->ps.sps;
 
-            if(h->sps.ref_frame_count <= 1 && h->pps.ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
+            // heuristic to detect non marked keyframes
+            if (h->ps.sps->ref_frame_count <= 1 && h->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
                 s->key_frame = 1;
 
-            s->coded_width  = 16 * h->sps.mb_width;
-            s->coded_height = 16 * h->sps.mb_height;
-            s->width        = s->coded_width  - (h->sps.crop_right + h->sps.crop_left);
-            s->height       = s->coded_height - (h->sps.crop_top   + h->sps.crop_bottom);
+            h->frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
+
+            s->coded_width  = 16 * sps->mb_width;
+            s->coded_height = 16 * sps->mb_height;
+            s->width        = s->coded_width  - (sps->crop_right + sps->crop_left);
+            s->height       = s->coded_height - (sps->crop_top   + sps->crop_bottom);
             if (s->width <= 0 || s->height <= 0) {
                 s->width  = s->coded_width;
                 s->height = s->coded_height;
             }
 
-            switch (h->sps.bit_depth_luma) {
+            switch (sps->bit_depth_luma) {
             case 9:
                 if (CHROMA444(h))      s->format = AV_PIX_FMT_YUV444P9;
                 else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9;
@@ -383,10 +397,10 @@ static inline int parse_nal_units(AVCodecParserContext *s,
                 s->format = AV_PIX_FMT_NONE;
             }
 
-            avctx->profile = ff_h264_get_profile(&h->sps);
-            avctx->level   = h->sps.level_idc;
+            avctx->profile = ff_h264_get_profile(sps);
+            avctx->level   = sps->level_idc;
 
-            if (h->sps.frame_mbs_only_flag) {
+            if (sps->frame_mbs_only_flag) {
                 h->picture_structure = PICT_FRAME;
             } else {
                 if (get_bits1(&nal.gb)) { // field_pic_flag
@@ -398,19 +412,19 @@ static inline int parse_nal_units(AVCodecParserContext *s,
 
             if (h->nal_unit_type == NAL_IDR_SLICE)
                 get_ue_golomb_long(&nal.gb); /* idr_pic_id */
-            if (h->sps.poc_type == 0) {
-                h->poc_lsb = get_bits(&nal.gb, h->sps.log2_max_poc_lsb);
+            if (sps->poc_type == 0) {
+                h->poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
 
-                if (h->pps.pic_order_present == 1 &&
+                if (p->ps.pps->pic_order_present == 1 &&
                     h->picture_structure == PICT_FRAME)
                     h->delta_poc_bottom = get_se_golomb(&nal.gb);
             }
 
-            if (h->sps.poc_type == 1 &&
-                !h->sps.delta_pic_order_always_zero_flag) {
+            if (sps->poc_type == 1 &&
+                !sps->delta_pic_order_always_zero_flag) {
                 h->delta_poc[0] = get_se_golomb(&nal.gb);
 
-                if (h->pps.pic_order_present == 1 &&
+                if (p->ps.pps->pic_order_present == 1 &&
                     h->picture_structure == PICT_FRAME)
                     h->delta_poc[1] = get_se_golomb(&nal.gb);
             }
@@ -444,7 +458,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
                 }
             }
 
-            if (h->sps.pic_struct_present_flag) {
+            if (sps->pic_struct_present_flag) {
                 switch (h->sei_pic_struct) {
                 case SEI_PIC_STRUCT_TOP_FIELD:
                 case SEI_PIC_STRUCT_BOTTOM_FIELD:
@@ -475,7 +489,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
 
             if (h->picture_structure == PICT_FRAME) {
                 s->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
-                if (h->sps.pic_struct_present_flag) {
+                if (sps->pic_struct_present_flag) {
                     switch (h->sei_pic_struct) {
                     case SEI_PIC_STRUCT_TOP_BOTTOM:
                     case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
@@ -533,6 +547,8 @@ static int h264_parse(AVCodecParserContext *s,
     if (!p->got_first) {
         p->got_first = 1;
         if (avctx->extradata_size) {
+            int i;
+
             h->avctx = avctx;
             // must be done like in decoder, otherwise opening the parser,
             // letting it create extradata and then closing and opening again
@@ -541,6 +557,25 @@ static int h264_parse(AVCodecParserContext *s,
             if (!avctx->has_b_frames)
                 h->low_delay = 1;
             ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
+
+            for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++) {
+                av_buffer_unref(&p->ps.sps_list[i]);
+                if (h->ps.sps_list[i]) {
+                    p->ps.sps_list[i] = av_buffer_ref(h->ps.sps_list[i]);
+                    if (!p->ps.sps_list[i])
+                        return AVERROR(ENOMEM);
+                }
+            }
+            for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++) {
+                av_buffer_unref(&p->ps.pps_list[i]);
+                if (h->ps.pps_list[i]) {
+                    p->ps.pps_list[i] = av_buffer_ref(h->ps.pps_list[i]);
+                    if (!p->ps.pps_list[i])
+                        return AVERROR(ENOMEM);
+                }
+            }
+
+            p->ps.sps = h->ps.sps;
         }
     }
 
@@ -626,9 +661,16 @@ static void h264_close(AVCodecParserContext *s)
     H264ParseContext *p = s->priv_data;
     H264Context      *h = &p->h;
     ParseContext *pc = &p->pc;
+    int i;
 
     av_freep(&pc->buffer);
     ff_h264_free_context(h);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++)
+        av_buffer_unref(&p->ps.sps_list[i]);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++)
+        av_buffer_unref(&p->ps.pps_list[i]);
 }
 
 static av_cold int init(AVCodecParserContext *s)
diff --git a/libavcodec/h264_ps.c b/libavcodec/h264_ps.c
index e0f0946..521f560 100644
--- a/libavcodec/h264_ps.c
+++ b/libavcodec/h264_ps.c
@@ -84,47 +84,68 @@ static const int level_max_dpb_mbs[][2] = {
     { 52, 184320    },
 };
 
-static inline int decode_hrd_parameters(H264Context *h, SPS *sps)
+static void remove_pps(H264ParamSets *s, int id)
+{
+    av_buffer_unref(&s->pps_list[id]);
+}
+
+static void remove_sps(H264ParamSets *s, int id)
+{
+#if 0
+    int i;
+    if (s->sps_list[id]) {
+        /* drop all PPS that depend on this SPS */
+        for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++)
+            if (s->pps_list[i] && ((PPS*)s->pps_list[i]->data)->sps_id == id)
+                remove_pps(s, i);
+    }
+#endif
+    av_buffer_unref(&s->sps_list[id]);
+}
+
+static inline int decode_hrd_parameters(GetBitContext *gb, AVCodecContext *avctx,
+                                        SPS *sps)
 {
     int cpb_count, i;
-    cpb_count = get_ue_golomb_31(&h->gb) + 1;
+    cpb_count = get_ue_golomb_31(gb) + 1;
 
     if (cpb_count > 32U) {
-        av_log(h->avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count);
+        av_log(avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count);
         return AVERROR_INVALIDDATA;
     }
 
-    get_bits(&h->gb, 4); /* bit_rate_scale */
-    get_bits(&h->gb, 4); /* cpb_size_scale */
+    get_bits(gb, 4); /* bit_rate_scale */
+    get_bits(gb, 4); /* cpb_size_scale */
     for (i = 0; i < cpb_count; i++) {
-        get_ue_golomb_long(&h->gb); /* bit_rate_value_minus1 */
-        get_ue_golomb_long(&h->gb); /* cpb_size_value_minus1 */
-        get_bits1(&h->gb);          /* cbr_flag */
-    }
-    sps->initial_cpb_removal_delay_length = get_bits(&h->gb, 5) + 1;
-    sps->cpb_removal_delay_length         = get_bits(&h->gb, 5) + 1;
-    sps->dpb_output_delay_length          = get_bits(&h->gb, 5) + 1;
-    sps->time_offset_length               = get_bits(&h->gb, 5);
+        get_ue_golomb_long(gb); /* bit_rate_value_minus1 */
+        get_ue_golomb_long(gb); /* cpb_size_value_minus1 */
+        get_bits1(gb);          /* cbr_flag */
+    }
+    sps->initial_cpb_removal_delay_length = get_bits(gb, 5) + 1;
+    sps->cpb_removal_delay_length         = get_bits(gb, 5) + 1;
+    sps->dpb_output_delay_length          = get_bits(gb, 5) + 1;
+    sps->time_offset_length               = get_bits(gb, 5);
     sps->cpb_cnt                          = cpb_count;
     return 0;
 }
 
-static inline int decode_vui_parameters(H264Context *h, SPS *sps)
+static inline int decode_vui_parameters(GetBitContext *gb, AVCodecContext *avctx,
+                                        SPS *sps)
 {
     int aspect_ratio_info_present_flag;
     unsigned int aspect_ratio_idc;
 
-    aspect_ratio_info_present_flag = get_bits1(&h->gb);
+    aspect_ratio_info_present_flag = get_bits1(gb);
 
     if (aspect_ratio_info_present_flag) {
-        aspect_ratio_idc = get_bits(&h->gb, 8);
+        aspect_ratio_idc = get_bits(gb, 8);
         if (aspect_ratio_idc == EXTENDED_SAR) {
-            sps->sar.num = get_bits(&h->gb, 16);
-            sps->sar.den = get_bits(&h->gb, 16);
+            sps->sar.num = get_bits(gb, 16);
+            sps->sar.den = get_bits(gb, 16);
         } else if (aspect_ratio_idc < FF_ARRAY_ELEMS(ff_h264_pixel_aspect)) {
             sps->sar = ff_h264_pixel_aspect[aspect_ratio_idc];
         } else {
-            av_log(h->avctx, AV_LOG_ERROR, "illegal aspect ratio\n");
+            av_log(avctx, AV_LOG_ERROR, "illegal aspect ratio\n");
             return AVERROR_INVALIDDATA;
         }
     } else {
@@ -132,19 +153,19 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps)
         sps->sar.den = 0;
     }
 
-    if (get_bits1(&h->gb))      /* overscan_info_present_flag */
-        get_bits1(&h->gb);      /* overscan_appropriate_flag */
+    if (get_bits1(gb))      /* overscan_info_present_flag */
+        get_bits1(gb);      /* overscan_appropriate_flag */
 
-    sps->video_signal_type_present_flag = get_bits1(&h->gb);
+    sps->video_signal_type_present_flag = get_bits1(gb);
     if (sps->video_signal_type_present_flag) {
-        get_bits(&h->gb, 3);                 /* video_format */
-        sps->full_range = get_bits1(&h->gb); /* video_full_range_flag */
+        get_bits(gb, 3);                 /* video_format */
+        sps->full_range = get_bits1(gb); /* video_full_range_flag */
 
-        sps->colour_description_present_flag = get_bits1(&h->gb);
+        sps->colour_description_present_flag = get_bits1(gb);
         if (sps->colour_description_present_flag) {
-            sps->color_primaries = get_bits(&h->gb, 8); /* colour_primaries */
-            sps->color_trc       = get_bits(&h->gb, 8); /* transfer_characteristics */
-            sps->colorspace      = get_bits(&h->gb, 8); /* matrix_coefficients */
+            sps->color_primaries = get_bits(gb, 8); /* colour_primaries */
+            sps->color_trc       = get_bits(gb, 8); /* transfer_characteristics */
+            sps->colorspace      = get_bits(gb, 8); /* matrix_coefficients */
             if (sps->color_primaries >= AVCOL_PRI_NB)
                 sps->color_primaries = AVCOL_PRI_UNSPECIFIED;
             if (sps->color_trc >= AVCOL_TRC_NB)
@@ -155,23 +176,23 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps)
     }
 
     /* chroma_location_info_present_flag */
-    if (get_bits1(&h->gb)) {
+    if (get_bits1(gb)) {
         /* chroma_sample_location_type_top_field */
-        h->avctx->chroma_sample_location = get_ue_golomb(&h->gb) + 1;
-        get_ue_golomb(&h->gb);  /* chroma_sample_location_type_bottom_field */
+        avctx->chroma_sample_location = get_ue_golomb(gb) + 1;
+        get_ue_golomb(gb);  /* chroma_sample_location_type_bottom_field */
     }
 
-    if (show_bits1(&h->gb) && get_bits_left(&h->gb) < 10) {
-        av_log(h->avctx, AV_LOG_WARNING, "Truncated VUI\n");
+    if (show_bits1(gb) && get_bits_left(gb) < 10) {
+        av_log(avctx, AV_LOG_WARNING, "Truncated VUI\n");
         return 0;
     }
 
-    sps->timing_info_present_flag = get_bits1(&h->gb);
+    sps->timing_info_present_flag = get_bits1(gb);
     if (sps->timing_info_present_flag) {
-        unsigned num_units_in_tick = get_bits_long(&h->gb, 32);
-        unsigned time_scale        = get_bits_long(&h->gb, 32);
+        unsigned num_units_in_tick = get_bits_long(gb, 32);
+        unsigned time_scale        = get_bits_long(gb, 32);
         if (!num_units_in_tick || !time_scale) {
-            av_log(h->avctx, AV_LOG_ERROR,
+            av_log(avctx, AV_LOG_ERROR,
                    "time_scale/num_units_in_tick invalid or unsupported (%u/%u)\n",
                    time_scale, num_units_in_tick);
             sps->timing_info_present_flag = 0;
@@ -179,41 +200,41 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps)
             sps->num_units_in_tick = num_units_in_tick;
             sps->time_scale = time_scale;
         }
-        sps->fixed_frame_rate_flag = get_bits1(&h->gb);
+        sps->fixed_frame_rate_flag = get_bits1(gb);
     }
 
-    sps->nal_hrd_parameters_present_flag = get_bits1(&h->gb);
+    sps->nal_hrd_parameters_present_flag = get_bits1(gb);
     if (sps->nal_hrd_parameters_present_flag)
-        if (decode_hrd_parameters(h, sps) < 0)
+        if (decode_hrd_parameters(gb, avctx, sps) < 0)
             return AVERROR_INVALIDDATA;
-    sps->vcl_hrd_parameters_present_flag = get_bits1(&h->gb);
+    sps->vcl_hrd_parameters_present_flag = get_bits1(gb);
     if (sps->vcl_hrd_parameters_present_flag)
-        if (decode_hrd_parameters(h, sps) < 0)
+        if (decode_hrd_parameters(gb, avctx, sps) < 0)
             return AVERROR_INVALIDDATA;
     if (sps->nal_hrd_parameters_present_flag ||
         sps->vcl_hrd_parameters_present_flag)
-        get_bits1(&h->gb);     /* low_delay_hrd_flag */
-    sps->pic_struct_present_flag = get_bits1(&h->gb);
-    if (!get_bits_left(&h->gb))
+        get_bits1(gb);     /* low_delay_hrd_flag */
+    sps->pic_struct_present_flag = get_bits1(gb);
+    if (!get_bits_left(gb))
         return 0;
-    sps->bitstream_restriction_flag = get_bits1(&h->gb);
+    sps->bitstream_restriction_flag = get_bits1(gb);
     if (sps->bitstream_restriction_flag) {
-        get_bits1(&h->gb);     /* motion_vectors_over_pic_boundaries_flag */
-        get_ue_golomb(&h->gb); /* max_bytes_per_pic_denom */
-        get_ue_golomb(&h->gb); /* max_bits_per_mb_denom */
-        get_ue_golomb(&h->gb); /* log2_max_mv_length_horizontal */
-        get_ue_golomb(&h->gb); /* log2_max_mv_length_vertical */
-        sps->num_reorder_frames = get_ue_golomb(&h->gb);
-        get_ue_golomb(&h->gb); /*max_dec_frame_buffering*/
-
-        if (get_bits_left(&h->gb) < 0) {
+        get_bits1(gb);     /* motion_vectors_over_pic_boundaries_flag */
+        get_ue_golomb(gb); /* max_bytes_per_pic_denom */
+        get_ue_golomb(gb); /* max_bits_per_mb_denom */
+        get_ue_golomb(gb); /* log2_max_mv_length_horizontal */
+        get_ue_golomb(gb); /* log2_max_mv_length_vertical */
+        sps->num_reorder_frames = get_ue_golomb(gb);
+        get_ue_golomb(gb); /*max_dec_frame_buffering*/
+
+        if (get_bits_left(gb) < 0) {
             sps->num_reorder_frames         = 0;
             sps->bitstream_restriction_flag = 0;
         }
 
         if (sps->num_reorder_frames > 16U
             /* max_dec_frame_buffering || max_dec_frame_buffering > 16 */) {
-            av_log(h->avctx, AV_LOG_ERROR,
+            av_log(avctx, AV_LOG_ERROR,
                    "Clipping illegal num_reorder_frames %d\n",
                    sps->num_reorder_frames);
             sps->num_reorder_frames = 16;
@@ -224,18 +245,18 @@ static inline int decode_vui_parameters(H264Context *h, SPS *sps)
     return 0;
 }
 
-static void decode_scaling_list(H264Context *h, uint8_t *factors, int size,
+static void decode_scaling_list(GetBitContext *gb, uint8_t *factors, int size,
                                 const uint8_t *jvt_list,
                                 const uint8_t *fallback_list)
 {
     int i, last = 8, next = 8;
     const uint8_t *scan = size == 16 ? ff_zigzag_scan : ff_zigzag_direct;
-    if (!get_bits1(&h->gb)) /* matrix not written, we use the predicted one */
+    if (!get_bits1(gb)) /* matrix not written, we use the predicted one */
         memcpy(factors, fallback_list, size * sizeof(uint8_t));
     else
         for (i = 0; i < size; i++) {
             if (next)
-                next = (last + get_se_golomb(&h->gb)) & 0xff;
+                next = (last + get_se_golomb(gb)) & 0xff;
             if (!i && !next) { /* matrix not written, we use the preset one */
                 memcpy(factors, jvt_list, size * sizeof(uint8_t));
                 break;
@@ -244,7 +265,7 @@ static void decode_scaling_list(H264Context *h, uint8_t *factors, int size,
         }
 }
 
-static void decode_scaling_matrices(H264Context *h, SPS *sps,
+static void decode_scaling_matrices(GetBitContext *gb, SPS *sps,
                                     PPS *pps, int is_sps,
                                     uint8_t(*scaling_matrix4)[16],
                                     uint8_t(*scaling_matrix8)[64])
@@ -256,58 +277,61 @@ static void decode_scaling_matrices(H264Context *h, SPS *sps,
         fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0],
         fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1]
     };
-    if (get_bits1(&h->gb)) {
+    if (get_bits1(gb)) {
         sps->scaling_matrix_present |= is_sps;
-        decode_scaling_list(h, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]);        // Intra, Y
-        decode_scaling_list(h, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr
-        decode_scaling_list(h, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb
-        decode_scaling_list(h, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]);        // Inter, Y
-        decode_scaling_list(h, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr
-        decode_scaling_list(h, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb
+        decode_scaling_list(gb, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]);        // Intra, Y
+        decode_scaling_list(gb, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr
+        decode_scaling_list(gb, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb
+        decode_scaling_list(gb, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]);        // Inter, Y
+        decode_scaling_list(gb, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr
+        decode_scaling_list(gb, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb
         if (is_sps || pps->transform_8x8_mode) {
-            decode_scaling_list(h, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y
-            decode_scaling_list(h, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y
+            decode_scaling_list(gb, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y
+            decode_scaling_list(gb, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y
             if (sps->chroma_format_idc == 3) {
-                decode_scaling_list(h, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr
-                decode_scaling_list(h, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr
-                decode_scaling_list(h, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb
-                decode_scaling_list(h, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb
+                decode_scaling_list(gb, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr
+                decode_scaling_list(gb, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr
+                decode_scaling_list(gb, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb
+                decode_scaling_list(gb, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb
             }
         }
     }
 }
 
-int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
+int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
+                                     H264ParamSets *ps, int ignore_truncation)
 {
+    AVBufferRef *sps_buf;
     int profile_idc, level_idc, constraint_set_flags = 0;
     unsigned int sps_id;
     int i, log2_max_frame_num_minus4;
     SPS *sps;
 
-    sps = av_mallocz(sizeof(SPS));
-    if (!sps)
+    sps_buf = av_buffer_allocz(sizeof(*sps));
+    if (!sps_buf)
         return AVERROR(ENOMEM);
+    sps = (SPS*)sps_buf->data;
 
-    sps->data_size = h->gb.buffer_end - h->gb.buffer;
+    sps->data_size = gb->buffer_end - gb->buffer;
     if (sps->data_size > sizeof(sps->data)) {
-        av_log(h->avctx, AV_LOG_WARNING, "Truncating likely oversized SPS\n");
+        av_log(avctx, AV_LOG_WARNING, "Truncating likely oversized SPS\n");
         sps->data_size = sizeof(sps->data);
     }
-    memcpy(sps->data, h->gb.buffer, sps->data_size);
-
-    profile_idc           = get_bits(&h->gb, 8);
-    constraint_set_flags |= get_bits1(&h->gb) << 0;   // constraint_set0_flag
-    constraint_set_flags |= get_bits1(&h->gb) << 1;   // constraint_set1_flag
-    constraint_set_flags |= get_bits1(&h->gb) << 2;   // constraint_set2_flag
-    constraint_set_flags |= get_bits1(&h->gb) << 3;   // constraint_set3_flag
-    constraint_set_flags |= get_bits1(&h->gb) << 4;   // constraint_set4_flag
-    constraint_set_flags |= get_bits1(&h->gb) << 5;   // constraint_set5_flag
-    skip_bits(&h->gb, 2);                             // reserved_zero_2bits
-    level_idc = get_bits(&h->gb, 8);
-    sps_id    = get_ue_golomb_31(&h->gb);
+    memcpy(sps->data, gb->buffer, sps->data_size);
+
+    profile_idc           = get_bits(gb, 8);
+    constraint_set_flags |= get_bits1(gb) << 0;   // constraint_set0_flag
+    constraint_set_flags |= get_bits1(gb) << 1;   // constraint_set1_flag
+    constraint_set_flags |= get_bits1(gb) << 2;   // constraint_set2_flag
+    constraint_set_flags |= get_bits1(gb) << 3;   // constraint_set3_flag
+    constraint_set_flags |= get_bits1(gb) << 4;   // constraint_set4_flag
+    constraint_set_flags |= get_bits1(gb) << 5;   // constraint_set5_flag
+    skip_bits(gb, 2);                             // reserved_zero_2bits
+    level_idc = get_bits(gb, 8);
+    sps_id    = get_ue_golomb_31(gb);
 
     if (sps_id >= MAX_SPS_COUNT) {
-        av_log(h->avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id);
+        av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id);
         goto fail;
     }
 
@@ -334,33 +358,33 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
         sps->profile_idc == 128 ||  // Multiview High profile (MVC)
         sps->profile_idc == 138 ||  // Multiview Depth High profile (MVCD)
         sps->profile_idc == 144) {  // old High444 profile
-        sps->chroma_format_idc = get_ue_golomb_31(&h->gb);
+        sps->chroma_format_idc = get_ue_golomb_31(gb);
         if (sps->chroma_format_idc > 3U) {
-            avpriv_request_sample(h->avctx, "chroma_format_idc %u",
+            avpriv_request_sample(avctx, "chroma_format_idc %u",
                                   sps->chroma_format_idc);
             goto fail;
         } else if (sps->chroma_format_idc == 3) {
-            sps->residual_color_transform_flag = get_bits1(&h->gb);
+            sps->residual_color_transform_flag = get_bits1(gb);
             if (sps->residual_color_transform_flag) {
-                av_log(h->avctx, AV_LOG_ERROR, "separate color planes are not supported\n");
+                av_log(avctx, AV_LOG_ERROR, "separate color planes are not supported\n");
                 goto fail;
             }
         }
-        sps->bit_depth_luma   = get_ue_golomb(&h->gb) + 8;
-        sps->bit_depth_chroma = get_ue_golomb(&h->gb) + 8;
+        sps->bit_depth_luma   = get_ue_golomb(gb) + 8;
+        sps->bit_depth_chroma = get_ue_golomb(gb) + 8;
         if (sps->bit_depth_chroma != sps->bit_depth_luma) {
-            avpriv_request_sample(h->avctx,
+            avpriv_request_sample(avctx,
                                   "Different chroma and luma bit depth");
             goto fail;
         }
         if (sps->bit_depth_luma   < 8 || sps->bit_depth_luma   > 14 ||
             sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) {
-            av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
+            av_log(avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
                    sps->bit_depth_luma, sps->bit_depth_chroma);
             goto fail;
         }
-        sps->transform_bypass = get_bits1(&h->gb);
-        decode_scaling_matrices(h, sps, NULL, 1,
+        sps->transform_bypass = get_bits1(gb);
+        decode_scaling_matrices(gb, sps, NULL, 1,
                                 sps->scaling_matrix4, sps->scaling_matrix8);
     } else {
         sps->chroma_format_idc = 1;
@@ -368,89 +392,89 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
         sps->bit_depth_chroma  = 8;
     }
 
-    log2_max_frame_num_minus4 = get_ue_golomb(&h->gb);
+    log2_max_frame_num_minus4 = get_ue_golomb(gb);
     if (log2_max_frame_num_minus4 < MIN_LOG2_MAX_FRAME_NUM - 4 ||
         log2_max_frame_num_minus4 > MAX_LOG2_MAX_FRAME_NUM - 4) {
-        av_log(h->avctx, AV_LOG_ERROR,
+        av_log(avctx, AV_LOG_ERROR,
                "log2_max_frame_num_minus4 out of range (0-12): %d\n",
                log2_max_frame_num_minus4);
         goto fail;
     }
     sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4;
 
-    sps->poc_type = get_ue_golomb_31(&h->gb);
+    sps->poc_type = get_ue_golomb_31(gb);
 
     if (sps->poc_type == 0) { // FIXME #define
-        unsigned t = get_ue_golomb(&h->gb);
+        unsigned t = get_ue_golomb(gb);
         if (t>12) {
-            av_log(h->avctx, AV_LOG_ERROR, "log2_max_poc_lsb (%d) is out of range\n", t);
+            av_log(avctx, AV_LOG_ERROR, "log2_max_poc_lsb (%d) is out of range\n", t);
             goto fail;
         }
         sps->log2_max_poc_lsb = t + 4;
     } else if (sps->poc_type == 1) { // FIXME #define
-        sps->delta_pic_order_always_zero_flag = get_bits1(&h->gb);
-        sps->offset_for_non_ref_pic           = get_se_golomb(&h->gb);
-        sps->offset_for_top_to_bottom_field   = get_se_golomb(&h->gb);
-        sps->poc_cycle_length                 = get_ue_golomb(&h->gb);
+        sps->delta_pic_order_always_zero_flag = get_bits1(gb);
+        sps->offset_for_non_ref_pic           = get_se_golomb(gb);
+        sps->offset_for_top_to_bottom_field   = get_se_golomb(gb);
+        sps->poc_cycle_length                 = get_ue_golomb(gb);
 
         if ((unsigned)sps->poc_cycle_length >=
             FF_ARRAY_ELEMS(sps->offset_for_ref_frame)) {
-            av_log(h->avctx, AV_LOG_ERROR,
+            av_log(avctx, AV_LOG_ERROR,
                    "poc_cycle_length overflow %d\n", sps->poc_cycle_length);
             goto fail;
         }
 
         for (i = 0; i < sps->poc_cycle_length; i++)
-            sps->offset_for_ref_frame[i] = get_se_golomb(&h->gb);
+            sps->offset_for_ref_frame[i] = get_se_golomb(gb);
     } else if (sps->poc_type != 2) {
-        av_log(h->avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type);
+        av_log(avctx, AV_LOG_ERROR, "illegal POC type %d\n", sps->poc_type);
         goto fail;
     }
 
-    sps->ref_frame_count = get_ue_golomb_31(&h->gb);
-    if (h->avctx->codec_tag == MKTAG('S', 'M', 'V', '2'))
+    sps->ref_frame_count = get_ue_golomb_31(gb);
+    if (avctx->codec_tag == MKTAG('S', 'M', 'V', '2'))
         sps->ref_frame_count = FFMAX(2, sps->ref_frame_count);
     if (sps->ref_frame_count > H264_MAX_PICTURE_COUNT - 2 ||
         sps->ref_frame_count > 16U) {
-        av_log(h->avctx, AV_LOG_ERROR,
+        av_log(avctx, AV_LOG_ERROR,
                "too many reference frames %d\n", sps->ref_frame_count);
         goto fail;
     }
-    sps->gaps_in_frame_num_allowed_flag = get_bits1(&h->gb);
-    sps->mb_width                       = get_ue_golomb(&h->gb) + 1;
-    sps->mb_height                      = get_ue_golomb(&h->gb) + 1;
+    sps->gaps_in_frame_num_allowed_flag = get_bits1(gb);
+    sps->mb_width                       = get_ue_golomb(gb) + 1;
+    sps->mb_height                      = get_ue_golomb(gb) + 1;
     if ((unsigned)sps->mb_width  >= INT_MAX / 16 ||
         (unsigned)sps->mb_height >= INT_MAX / 16 ||
         av_image_check_size(16 * sps->mb_width,
-                            16 * sps->mb_height, 0, h->avctx)) {
-        av_log(h->avctx, AV_LOG_ERROR, "mb_width/height overflow\n");
+                            16 * sps->mb_height, 0, avctx)) {
+        av_log(avctx, AV_LOG_ERROR, "mb_width/height overflow\n");
         goto fail;
     }
 
-    sps->frame_mbs_only_flag = get_bits1(&h->gb);
+    sps->frame_mbs_only_flag = get_bits1(gb);
     if (!sps->frame_mbs_only_flag)
-        sps->mb_aff = get_bits1(&h->gb);
+        sps->mb_aff = get_bits1(gb);
     else
         sps->mb_aff = 0;
 
-    sps->direct_8x8_inference_flag = get_bits1(&h->gb);
+    sps->direct_8x8_inference_flag = get_bits1(gb);
 
 #ifndef ALLOW_INTERLACE
     if (sps->mb_aff)
-        av_log(h->avctx, AV_LOG_ERROR,
+        av_log(avctx, AV_LOG_ERROR,
                "MBAFF support not included; enable it at compile-time.\n");
 #endif
-    sps->crop = get_bits1(&h->gb);
+    sps->crop = get_bits1(gb);
     if (sps->crop) {
-        unsigned int crop_left   = get_ue_golomb(&h->gb);
-        unsigned int crop_right  = get_ue_golomb(&h->gb);
-        unsigned int crop_top    = get_ue_golomb(&h->gb);
-        unsigned int crop_bottom = get_ue_golomb(&h->gb);
+        unsigned int crop_left   = get_ue_golomb(gb);
+        unsigned int crop_right  = get_ue_golomb(gb);
+        unsigned int crop_top    = get_ue_golomb(gb);
+        unsigned int crop_bottom = get_ue_golomb(gb);
         int width  = 16 * sps->mb_width;
         int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag);
 
-        if (h->avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) {
-            av_log(h->avctx, AV_LOG_DEBUG, "discarding sps cropping, original "
+        if (avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) {
+            av_log(avctx, AV_LOG_DEBUG, "discarding sps cropping, original "
                                            "values are l:%d r:%d t:%d b:%d\n",
                    crop_left, crop_right, crop_top, crop_bottom);
 
@@ -466,9 +490,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
             int step_y = (2 - sps->frame_mbs_only_flag) << vsub;
 
             if (crop_left & (0x1F >> (sps->bit_depth_luma > 8)) &&
-                !(h->avctx->flags & AV_CODEC_FLAG_UNALIGNED)) {
+                !(avctx->flags & AV_CODEC_FLAG_UNALIGNED)) {
                 crop_left &= ~(0x1F >> (sps->bit_depth_luma > 8));
-                av_log(h->avctx, AV_LOG_WARNING,
+                av_log(avctx, AV_LOG_WARNING,
                        "Reducing left cropping to %d "
                        "chroma samples to preserve alignment.\n",
                        crop_left);
@@ -481,7 +505,7 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
                 (crop_left + crop_right ) * step_x >= width ||
                 (crop_top  + crop_bottom) * step_y >= height
             ) {
-                av_log(h->avctx, AV_LOG_ERROR, "crop values invalid %d %d %d %d / %d %d\n", crop_left, crop_right, crop_top, crop_bottom, width, height);
+                av_log(avctx, AV_LOG_ERROR, "crop values invalid %d %d %d %d / %d %d\n", crop_left, crop_right, crop_top, crop_bottom, width, height);
                 goto fail;
             }
 
@@ -498,16 +522,16 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
         sps->crop        = 0;
     }
 
-    sps->vui_parameters_present_flag = get_bits1(&h->gb);
+    sps->vui_parameters_present_flag = get_bits1(gb);
     if (sps->vui_parameters_present_flag) {
-        int ret = decode_vui_parameters(h, sps);
+        int ret = decode_vui_parameters(gb, avctx, sps);
         if (ret < 0)
             goto fail;
     }
 
-    if (get_bits_left(&h->gb) < 0) {
-        av_log(h->avctx, ignore_truncation ? AV_LOG_WARNING : AV_LOG_ERROR,
-               "Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(&h->gb));
+    if (get_bits_left(gb) < 0) {
+        av_log(avctx, ignore_truncation ? AV_LOG_WARNING : AV_LOG_ERROR,
+               "Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(gb));
         if (!ignore_truncation)
             goto fail;
     }
@@ -528,9 +552,9 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
     if (!sps->sar.den)
         sps->sar.den = 1;
 
-    if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
+    if (avctx->debug & FF_DEBUG_PICT_INFO) {
         static const char csp[4][5] = { "Gray", "420", "422", "444" };
-        av_log(h->avctx, AV_LOG_DEBUG,
+        av_log(avctx, AV_LOG_DEBUG,
                "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %"PRId32"/%"PRId32" b%d reo:%d\n",
                sps_id, sps->profile_idc, sps->level_idc,
                sps->poc_type,
@@ -548,18 +572,97 @@ int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
                sps->bitstream_restriction_flag ? sps->num_reorder_frames : -1
                );
     }
-    sps->new = 1;
 
-    av_free(h->sps_buffers[sps_id]);
-    h->sps_buffers[sps_id] = sps;
+    /* check if this is a repeat of an already parsed SPS, then keep the
+     * original one.
+     * otherwise drop all PPSes that depend on it */
+    if (ps->sps_list[sps_id] &&
+        !memcmp(ps->sps_list[sps_id]->data, sps_buf->data, sps_buf->size)) {
+        av_buffer_unref(&sps_buf);
+    } else {
+        remove_sps(ps, sps_id);
+        ps->sps_list[sps_id] = sps_buf;
+    }
 
     return 0;
 
 fail:
-    av_free(sps);
+    av_buffer_unref(&sps_buf);
     return AVERROR_INVALIDDATA;
 }
 
+static void init_dequant8_coeff_table(PPS *pps, const SPS *sps)
+{
+    int i, j, q, x;
+    const int max_qp = 51 + 6 * (sps->bit_depth_luma - 8);
+
+    for (i = 0; i < 6; i++) {
+        pps->dequant8_coeff[i] = pps->dequant8_buffer[i];
+        for (j = 0; j < i; j++)
+            if (!memcmp(pps->scaling_matrix8[j], pps->scaling_matrix8[i],
+                        64 * sizeof(uint8_t))) {
+                pps->dequant8_coeff[i] = pps->dequant8_buffer[j];
+                break;
+            }
+        if (j < i)
+            continue;
+
+        for (q = 0; q < max_qp + 1; q++) {
+            int shift = ff_h264_quant_div6[q];
+            int idx   = ff_h264_quant_rem6[q];
+            for (x = 0; x < 64; x++)
+                pps->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
+                    ((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
+                     pps->scaling_matrix8[i][x]) << shift;
+        }
+    }
+}
+
+static void init_dequant4_coeff_table(PPS *pps, const SPS *sps)
+{
+    int i, j, q, x;
+    const int max_qp = 51 + 6 * (sps->bit_depth_luma - 8);
+    for (i = 0; i < 6; i++) {
+        pps->dequant4_coeff[i] = pps->dequant4_buffer[i];
+        for (j = 0; j < i; j++)
+            if (!memcmp(pps->scaling_matrix4[j], pps->scaling_matrix4[i],
+                        16 * sizeof(uint8_t))) {
+                pps->dequant4_coeff[i] = pps->dequant4_buffer[j];
+                break;
+            }
+        if (j < i)
+            continue;
+
+        for (q = 0; q < max_qp + 1; q++) {
+            int shift = ff_h264_quant_div6[q] + 2;
+            int idx   = ff_h264_quant_rem6[q];
+            for (x = 0; x < 16; x++)
+                pps->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
+                    ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
+                     pps->scaling_matrix4[i][x]) << shift;
+        }
+    }
+}
+
+static void init_dequant_tables(PPS *pps, const SPS *sps)
+{
+    int i, x;
+    init_dequant4_coeff_table(pps, sps);
+    memset(pps->dequant8_coeff, 0, sizeof(pps->dequant8_coeff));
+
+    if (pps->transform_8x8_mode)
+        init_dequant8_coeff_table(pps, sps);
+    if (sps->transform_bypass) {
+        for (i = 0; i < 6; i++)
+            for (x = 0; x < 16; x++)
+                pps->dequant4_coeff[i][0][x] = 1 << 6;
+        if (pps->transform_8x8_mode)
+            for (i = 0; i < 6; i++)
+                for (x = 0; x < 64; x++)
+                    pps->dequant8_coeff[i][0][x] = 1 << 6;
+    }
+}
+
 static void build_qp_table(PPS *pps, int t, int index, const int depth)
 {
     int i;
@@ -569,14 +672,13 @@ static void build_qp_table(PPS *pps, int t, int index, const int depth)
             ff_h264_chroma_qp[depth - 8][av_clip(i + index, 0, max_qp)];
 }
 
-static int more_rbsp_data_in_pps(H264Context *h, PPS *pps)
+static int more_rbsp_data_in_pps(const SPS *sps, void *logctx)
 {
-    const SPS *sps = h->sps_buffers[pps->sps_id];
     int profile_idc = sps->profile_idc;
 
     if ((profile_idc == 66 || profile_idc == 77 ||
          profile_idc == 88) && (sps->constraint_set_flags & 7)) {
-        av_log(h->avctx, AV_LOG_VERBOSE,
+        av_log(logctx, AV_LOG_VERBOSE,
                "Current profile doesn't provide more RBSP data in PPS, skipping\n");
         return 0;
     }
@@ -584,57 +686,62 @@ static int more_rbsp_data_in_pps(H264Context *h, PPS *pps)
     return 1;
 }
 
-int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
+int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
+                                         H264ParamSets *ps, int bit_length)
 {
-    const SPS *sps;
-    unsigned int pps_id = get_ue_golomb(&h->gb);
+    AVBufferRef *pps_buf;
+    SPS *sps;
+    unsigned int pps_id = get_ue_golomb(gb);
     PPS *pps;
     int qp_bd_offset;
     int bits_left;
     int ret;
 
     if (pps_id >= MAX_PPS_COUNT) {
-        av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id);
+        av_log(avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id);
         return AVERROR_INVALIDDATA;
     }
 
-    pps = av_mallocz(sizeof(PPS));
-    if (!pps)
+    pps_buf = av_buffer_allocz(sizeof(*pps));
+    if (!pps_buf)
         return AVERROR(ENOMEM);
-    pps->data_size = h->gb.buffer_end - h->gb.buffer;
+    pps = (PPS*)pps_buf->data;
+
+    pps->data_size = gb->buffer_end - gb->buffer;
     if (pps->data_size > sizeof(pps->data)) {
-        av_log(h->avctx, AV_LOG_WARNING, "Truncating likely oversized PPS\n");
+        av_log(avctx, AV_LOG_WARNING, "Truncating likely oversized PPS\n");
         pps->data_size = sizeof(pps->data);
     }
-    memcpy(pps->data, h->gb.buffer, pps->data_size);
-    pps->sps_id = get_ue_golomb_31(&h->gb);
+    memcpy(pps->data, gb->buffer, pps->data_size);
+
+    pps->sps_id = get_ue_golomb_31(gb);
     if ((unsigned)pps->sps_id >= MAX_SPS_COUNT ||
-        !h->sps_buffers[pps->sps_id]) {
-        av_log(h->avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id);
+        !ps->sps_list[pps->sps_id]) {
+        av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id);
         ret = AVERROR_INVALIDDATA;
         goto fail;
     }
-    sps = h->sps_buffers[pps->sps_id];
+    sps = (SPS*)ps->sps_list[pps->sps_id]->data;
     if (sps->bit_depth_luma > 14) {
-        av_log(h->avctx, AV_LOG_ERROR,
+        av_log(avctx, AV_LOG_ERROR,
                "Invalid luma bit depth=%d\n",
                sps->bit_depth_luma);
         ret = AVERROR_INVALIDDATA;
         goto fail;
     } else if (sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13) {
-        av_log(h->avctx, AV_LOG_ERROR,
+        av_log(avctx, AV_LOG_ERROR,
                "Unimplemented luma bit depth=%d\n",
                sps->bit_depth_luma);
         ret = AVERROR_PATCHWELCOME;
         goto fail;
     }
 
-    pps->cabac             = get_bits1(&h->gb);
-    pps->pic_order_present = get_bits1(&h->gb);
-    pps->slice_group_count = get_ue_golomb(&h->gb) + 1;
+    pps->cabac             = get_bits1(gb);
+    pps->pic_order_present = get_bits1(gb);
+    pps->slice_group_count = get_ue_golomb(gb) + 1;
     if (pps->slice_group_count > 1) {
-        pps->mb_slice_group_map_type = get_ue_golomb(&h->gb);
-        av_log(h->avctx, AV_LOG_ERROR, "FMO not supported\n");
+        pps->mb_slice_group_map_type = get_ue_golomb(gb);
+        av_log(avctx, AV_LOG_ERROR, "FMO not supported\n");
         switch (pps->mb_slice_group_map_type) {
         case 0:
 #if 0
@@ -667,40 +774,38 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
             break;
         }
     }
-    pps->ref_count[0] = get_ue_golomb(&h->gb) + 1;
-    pps->ref_count[1] = get_ue_golomb(&h->gb) + 1;
+    pps->ref_count[0] = get_ue_golomb(gb) + 1;
+    pps->ref_count[1] = get_ue_golomb(gb) + 1;
     if (pps->ref_count[0] - 1 > 32 - 1 || pps->ref_count[1] - 1 > 32 - 1) {
-        av_log(h->avctx, AV_LOG_ERROR, "reference overflow (pps)\n");
+        av_log(avctx, AV_LOG_ERROR, "reference overflow (pps)\n");
         ret = AVERROR_INVALIDDATA;
         goto fail;
     }
 
     qp_bd_offset = 6 * (sps->bit_depth_luma - 8);
 
-    pps->weighted_pred                        = get_bits1(&h->gb);
-    pps->weighted_bipred_idc                  = get_bits(&h->gb, 2);
-    pps->init_qp                              = get_se_golomb(&h->gb) + 26 + qp_bd_offset;
-    pps->init_qs                              = get_se_golomb(&h->gb) + 26 + qp_bd_offset;
-    pps->chroma_qp_index_offset[0]            = get_se_golomb(&h->gb);
-    pps->deblocking_filter_parameters_present = get_bits1(&h->gb);
-    pps->constrained_intra_pred               = get_bits1(&h->gb);
-    pps->redundant_pic_cnt_present            = get_bits1(&h->gb);
+    pps->weighted_pred                        = get_bits1(gb);
+    pps->weighted_bipred_idc                  = get_bits(gb, 2);
+    pps->init_qp                              = get_se_golomb(gb) + 26 + qp_bd_offset;
+    pps->init_qs                              = get_se_golomb(gb) + 26 + qp_bd_offset;
+    pps->chroma_qp_index_offset[0]            = get_se_golomb(gb);
+    pps->deblocking_filter_parameters_present = get_bits1(gb);
+    pps->constrained_intra_pred               = get_bits1(gb);
+    pps->redundant_pic_cnt_present            = get_bits1(gb);
 
     pps->transform_8x8_mode = 0;
-    // contents of sps/pps can change even if id doesn't, so reinit
-    h->dequant_coeff_pps = -1;
-    memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4,
+    memcpy(pps->scaling_matrix4, sps->scaling_matrix4,
            sizeof(pps->scaling_matrix4));
-    memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8,
+    memcpy(pps->scaling_matrix8, sps->scaling_matrix8,
            sizeof(pps->scaling_matrix8));
 
-    bits_left = bit_length - get_bits_count(&h->gb);
-    if (bits_left > 0 && more_rbsp_data_in_pps(h, pps)) {
-        pps->transform_8x8_mode = get_bits1(&h->gb);
-        decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0,
+    bits_left = bit_length - get_bits_count(gb);
+    if (bits_left > 0 && more_rbsp_data_in_pps(sps, avctx)) {
+        pps->transform_8x8_mode = get_bits1(gb);
+        decode_scaling_matrices(gb, sps, pps, 0,
                                 pps->scaling_matrix4, pps->scaling_matrix8);
         // second_chroma_qp_index_offset
-        pps->chroma_qp_index_offset[1] = get_se_golomb(&h->gb);
+        pps->chroma_qp_index_offset[1] = get_se_golomb(gb);
     } else {
         pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0];
     }
@@ -709,11 +814,14 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
                    sps->bit_depth_luma);
     build_qp_table(pps, 1, pps->chroma_qp_index_offset[1],
                    sps->bit_depth_luma);
+
+    init_dequant_tables(pps, sps);
+
     if (pps->chroma_qp_index_offset[0] != pps->chroma_qp_index_offset[1])
         pps->chroma_qp_diff = 1;
 
-    if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
-        av_log(h->avctx, AV_LOG_DEBUG,
+    if (avctx->debug & FF_DEBUG_PICT_INFO) {
+        av_log(avctx, AV_LOG_DEBUG,
                "pps:%u sps:%u %s slice_groups:%d ref:%u/%u %s qp:%d/%d/%d/%d %s %s %s %s\n",
                pps_id, pps->sps_id,
                pps->cabac ? "CABAC" : "CAVLC",
@@ -727,11 +835,12 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
                pps->transform_8x8_mode ? "8x8DCT" : "");
     }
 
-    av_free(h->pps_buffers[pps_id]);
-    h->pps_buffers[pps_id] = pps;
+    remove_pps(ps, pps_id);
+    ps->pps_list[pps_id] = pps_buf;
+
     return 0;
 
 fail:
-    av_free(pps);
+    av_buffer_unref(&pps_buf);
     return ret;
 }
diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c
index 279dbcf..02c7867 100644
--- a/libavcodec/h264_refs.c
+++ b/libavcodec/h264_refs.c
@@ -592,7 +592,7 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
     int mmco_index = 0, i = 0;
 
     if (h->short_ref_count &&
-        h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count &&
+        h->long_ref_count + h->short_ref_count >= h->ps.sps->ref_frame_count &&
         !(FIELD_PICTURE(h) && !h->first_field && h->cur_pic_ptr->reference)) {
         mmco[0].opcode        = MMCO_SHORT2UNUSED;
         mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
@@ -768,7 +768,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
         }
     }
 
-    if (h->long_ref_count + h->short_ref_count > FFMAX(h->sps.ref_frame_count, 1)) {
+    if (h->long_ref_count + h->short_ref_count > FFMAX(h->ps.sps->ref_frame_count, 1)) {
 
         /* We have too many reference frames, probably due to corrupted
          * stream. Need to discard one frame. Prevents overrun of the
@@ -777,7 +777,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
         av_log(h->avctx, AV_LOG_ERROR,
                "number of reference frames (%d+%d) exceeds max (%d; probably "
                "corrupt input), discarding one\n",
-               h->long_ref_count, h->short_ref_count, h->sps.ref_frame_count);
+               h->long_ref_count, h->short_ref_count, h->ps.sps->ref_frame_count);
         err = AVERROR_INVALIDDATA;
 
         if (h->long_ref_count && !h->short_ref_count) {
@@ -796,8 +796,8 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
     for (i = 0; i<h->short_ref_count; i++) {
         pic = h->short_ref[i];
         if (pic->invalid_gap) {
-            int d = av_mod_uintp2(h->cur_pic_ptr->frame_num - pic->frame_num, h->sps.log2_max_frame_num);
-            if (d > h->sps.ref_frame_count)
+            int d = av_mod_uintp2(h->cur_pic_ptr->frame_num - pic->frame_num, h->ps.sps->log2_max_frame_num);
+            if (d > h->ps.sps->ref_frame_count)
                 remove_short(h, pic->frame_num, 0);
         }
     }
@@ -805,10 +805,11 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
     print_short_term(h);
     print_long_term(h);
 
-    for (i = 0; i < FF_ARRAY_ELEMS(h->pps_buffers); i++) {
-        if (h->pps_buffers[i]) {
-            pps_ref_count[0] = FFMAX(pps_ref_count[0], h->pps_buffers[i]->ref_count[0]);
-            pps_ref_count[1] = FFMAX(pps_ref_count[1], h->pps_buffers[i]->ref_count[1]);
+    for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
+        if (h->ps.pps_list[i]) {
+            const PPS *pps = (const PPS *)h->ps.pps_list[i]->data;
+            pps_ref_count[0] = FFMAX(pps_ref_count[0], pps->ref_count[0]);
+            pps_ref_count[1] = FFMAX(pps_ref_count[1], pps->ref_count[1]);
         }
     }
 
diff --git a/libavcodec/h264_sei.c b/libavcodec/h264_sei.c
index 77dd7b2..bdc5c9f 100644
--- a/libavcodec/h264_sei.c
+++ b/libavcodec/h264_sei.c
@@ -50,12 +50,17 @@ void ff_h264_reset_sei(H264Context *h)
 
 static int decode_picture_timing(H264Context *h)
 {
-    SPS *sps = &h->sps;
+    const SPS *sps = h->ps.sps;
     int i;
 
     for (i = 0; i<MAX_SPS_COUNT; i++)
-        if (!sps->log2_max_frame_num && h->sps_buffers[i])
-            sps = h->sps_buffers[i];
+        if ((!sps || !sps->log2_max_frame_num) && h->ps.sps_list[i])
+            sps = (const SPS *)h->ps.sps_list[i]->data;
+
+    if (!sps) {
+        av_log(h->avctx, AV_LOG_ERROR, "SPS unavailable in decode_picture_timing\n");
+        return 0;
+    }
 
     if (sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag) {
         h->sei_cpb_removal_delay = get_bits_long(&h->gb,
@@ -275,12 +280,12 @@ static int decode_buffering_period(H264Context *h)
     SPS *sps;
 
     sps_id = get_ue_golomb_31(&h->gb);
-    if (sps_id > 31 || !h->sps_buffers[sps_id]) {
+    if (sps_id > 31 || !h->ps.sps_list[sps_id]) {
         av_log(h->avctx, AV_LOG_ERROR,
                "non-existing SPS %d referenced in buffering period\n", sps_id);
         return AVERROR_INVALIDDATA;
     }
-    sps = h->sps_buffers[sps_id];
+    sps = (SPS*)h->ps.sps_list[sps_id]->data;
 
     // NOTE: This is really so duplicated in the standard... See H.264, D.1.1
     if (sps->nal_hrd_parameters_present_flag) {
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 6f9a041..394a0c4 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -192,9 +192,9 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
     if (ret < 0)
         goto fail;
 
-    pic->crop     = h->sps.crop;
-    pic->crop_top = h->sps.crop_top;
-    pic->crop_left= h->sps.crop_left;
+    pic->crop     = h->ps.sps->crop;
+    pic->crop_top = h->ps.sps->crop_top;
+    pic->crop_left= h->ps.sps->crop_left;
 
     if (h->avctx->hwaccel) {
         const AVHWAccel *hwaccel = h->avctx->hwaccel;
@@ -271,78 +271,6 @@ static int find_unused_picture(H264Context *h)
 }
 
 
-static void init_dequant8_coeff_table(H264Context *h)
-{
-    int i, j, q, x;
-    const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
-
-    for (i = 0; i < 6; i++) {
-        h->dequant8_coeff[i] = h->dequant8_buffer[i];
-        for (j = 0; j < i; j++)
-            if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
-                        64 * sizeof(uint8_t))) {
-                h->dequant8_coeff[i] = h->dequant8_buffer[j];
-                break;
-            }
-        if (j < i)
-            continue;
-
-        for (q = 0; q < max_qp + 1; q++) {
-            int shift = ff_h264_quant_div6[q];
-            int idx   = ff_h264_quant_rem6[q];
-            for (x = 0; x < 64; x++)
-                h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
-                    ((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
-                     h->pps.scaling_matrix8[i][x]) << shift;
-        }
-    }
-}
-
-static void init_dequant4_coeff_table(H264Context *h)
-{
-    int i, j, q, x;
-    const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
-    for (i = 0; i < 6; i++) {
-        h->dequant4_coeff[i] = h->dequant4_buffer[i];
-        for (j = 0; j < i; j++)
-            if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
-                        16 * sizeof(uint8_t))) {
-                h->dequant4_coeff[i] = h->dequant4_buffer[j];
-                break;
-            }
-        if (j < i)
-            continue;
-
-        for (q = 0; q < max_qp + 1; q++) {
-            int shift = ff_h264_quant_div6[q] + 2;
-            int idx   = ff_h264_quant_rem6[q];
-            for (x = 0; x < 16; x++)
-                h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
-                    ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
-                     h->pps.scaling_matrix4[i][x]) << shift;
-        }
-    }
-}
-
-void ff_h264_init_dequant_tables(H264Context *h)
-{
-    int i, x;
-    init_dequant4_coeff_table(h);
-    memset(h->dequant8_coeff, 0, sizeof(h->dequant8_coeff));
-
-    if (h->pps.transform_8x8_mode)
-        init_dequant8_coeff_table(h);
-    if (h->sps.transform_bypass) {
-        for (i = 0; i < 6; i++)
-            for (x = 0; x < 16; x++)
-                h->dequant4_coeff[i][0][x] = 1 << 6;
-        if (h->pps.transform_8x8_mode)
-            for (i = 0; i < 6; i++)
-                for (x = 0; x < 64; x++)
-                    h->dequant8_coeff[i][0][x] = 1 << 6;
-    }
-}
-
 #define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
 
 #define REBASE_PICTURE(pic, new_ctx, old_ctx)             \
@@ -364,26 +292,6 @@ static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
     }
 }
 
-static int copy_parameter_set(void **to, void **from, int count, int size)
-{
-    int i;
-
-    for (i = 0; i < count; i++) {
-        if (to[i] && !from[i]) {
-            av_freep(&to[i]);
-        } else if (from[i] && !to[i]) {
-            to[i] = av_malloc(size);
-            if (!to[i])
-                return AVERROR(ENOMEM);
-        }
-
-        if (from[i])
-            memcpy(to[i], from[i], size);
-    }
-
-    return 0;
-}
-
 #define copy_fields(to, from, start_field, end_field)                   \
     memcpy(&(to)->start_field, &(from)->start_field,                        \
            (char *)&(to)->end_field - (char *)&(to)->start_field)
@@ -401,15 +309,19 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
     if (dst == src)
         return 0;
 
+    // We can't fail if SPS isn't set at it breaks current skip_frame code
+    //if (!h1->ps.sps)
+    //    return AVERROR_INVALIDDATA;
+
     if (inited &&
         (h->width                 != h1->width                 ||
          h->height                != h1->height                ||
          h->mb_width              != h1->mb_width              ||
          h->mb_height             != h1->mb_height             ||
-         h->sps.bit_depth_luma    != h1->sps.bit_depth_luma    ||
-         h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
-         h->sps.colorspace        != h1->sps.colorspace)) {
-
+         !h->ps.sps                                            ||
+         h->ps.sps->bit_depth_luma    != h1->ps.sps->bit_depth_luma    ||
+         h->ps.sps->chroma_format_idc != h1->ps.sps->chroma_format_idc ||
+         h->ps.sps->colorspace        != h1->ps.sps->colorspace)) {
         need_reinit = 1;
     }
 
@@ -417,16 +329,39 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
     memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
 
     // SPS/PPS
-    if ((ret = copy_parameter_set((void **)h->sps_buffers,
-                                  (void **)h1->sps_buffers,
-                                  MAX_SPS_COUNT, sizeof(SPS))) < 0)
-        return ret;
-    h->sps = h1->sps;
-    if ((ret = copy_parameter_set((void **)h->pps_buffers,
-                                  (void **)h1->pps_buffers,
-                                  MAX_PPS_COUNT, sizeof(PPS))) < 0)
-        return ret;
-    h->pps = h1->pps;
+    for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
+        av_buffer_unref(&h->ps.sps_list[i]);
+        if (h1->ps.sps_list[i]) {
+            h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
+            if (!h->ps.sps_list[i])
+                return AVERROR(ENOMEM);
+        }
+    }
+    for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
+        av_buffer_unref(&h->ps.pps_list[i]);
+        if (h1->ps.pps_list[i]) {
+            h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
+            if (!h->ps.pps_list[i])
+                return AVERROR(ENOMEM);
+        }
+    }
+
+    av_buffer_unref(&h->ps.pps_ref);
+    av_buffer_unref(&h->ps.sps_ref);
+    h->ps.pps = NULL;
+    h->ps.sps = NULL;
+    if (h1->ps.pps_ref) {
+        h->ps.pps_ref = av_buffer_ref(h1->ps.pps_ref);
+        if (!h->ps.pps_ref)
+            return AVERROR(ENOMEM);
+        h->ps.pps = h->ps.pps_ref->data;
+    }
+    if (h1->ps.sps_ref) {
+        h->ps.sps_ref = av_buffer_ref(h1->ps.sps_ref);
+        if (!h->ps.sps_ref)
+            return AVERROR(ENOMEM);
+        h->ps.sps = h->ps.sps_ref->data;
+    }
 
     if (need_reinit || !inited) {
         h->width     = h1->width;
@@ -485,20 +420,6 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
     h->nal_length_size = h1->nal_length_size;
     h->x264_build      = h1->x264_build;
 
-    // Dequantization matrices
-    // FIXME these are big - can they be only copied when PPS changes?
-    copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
-
-    for (i = 0; i < 6; i++)
-        h->dequant4_coeff[i] = h->dequant4_buffer[0] +
-                               (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
-
-    for (i = 0; i < 6; i++)
-        h->dequant8_coeff[i] = h->dequant8_buffer[0] +
-                               (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
-
-    h->dequant_coeff_pps = h1->dequant_coeff_pps;
-
     // POC timing
     copy_fields(h, h1, poc_lsb, current_slice);
 
@@ -531,9 +452,9 @@ static int h264_frame_start(H264Context *h)
     int i, ret;
     const int pixel_shift = h->pixel_shift;
     int c[4] = {
-        1<<(h->sps.bit_depth_luma-1),
-        1<<(h->sps.bit_depth_chroma-1),
-        1<<(h->sps.bit_depth_chroma-1),
+        1<<(h->ps.sps->bit_depth_luma-1),
+        1<<(h->ps.sps->bit_depth_chroma-1),
+        1<<(h->ps.sps->bit_depth_chroma-1),
         -1
     };
 
@@ -806,7 +727,7 @@ static void init_scan_tables(H264Context *h)
         h->field_scan8x8_cavlc[i]  = TRANSPOSE(field_scan8x8_cavlc[i]);
 #undef TRANSPOSE
     }
-    if (h->sps.transform_bypass) { // FIXME same ugly
+    if (h->ps.sps->transform_bypass) { // FIXME same ugly
         memcpy(h->zigzag_scan_q0          , ff_zigzag_scan          , sizeof(h->zigzag_scan_q0         ));
         memcpy(h->zigzag_scan8x8_q0       , ff_zigzag_direct        , sizeof(h->zigzag_scan8x8_q0      ));
         memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc    , sizeof(h->zigzag_scan8x8_cavlc_q0));
@@ -835,7 +756,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
     const enum AVPixelFormat *choices = pix_fmts;
     int i;
 
-    switch (h->sps.bit_depth_luma) {
+    switch (h->ps.sps->bit_depth_luma) {
     case 9:
         if (CHROMA444(h)) {
             if (h->avctx->colorspace == AVCOL_SPC_RGB) {
@@ -923,7 +844,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
         break;
     default:
         av_log(h->avctx, AV_LOG_ERROR,
-               "Unsupported bit depth %d\n", h->sps.bit_depth_luma);
+               "Unsupported bit depth %d\n", h->ps.sps->bit_depth_luma);
         return AVERROR_INVALIDDATA;
     }
 
@@ -938,10 +859,11 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
 /* export coded and cropped frame dimensions to AVCodecContext */
 static int init_dimensions(H264Context *h)
 {
-    int width  = h->width  - (h->sps.crop_right + h->sps.crop_left);
-    int height = h->height - (h->sps.crop_top   + h->sps.crop_bottom);
-    av_assert0(h->sps.crop_right + h->sps.crop_left < (unsigned)h->width);
-    av_assert0(h->sps.crop_top + h->sps.crop_bottom < (unsigned)h->height);
+    SPS *sps = h->ps.sps;
+    int width  = h->width  - (sps->crop_right + sps->crop_left);
+    int height = h->height - (sps->crop_top   + sps->crop_bottom);
+    av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
+    av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
 
     /* handle container cropping */
     if (FFALIGN(h->avctx->width,  16) == FFALIGN(width,  16) &&
@@ -960,11 +882,11 @@ static int init_dimensions(H264Context *h)
             return AVERROR_INVALIDDATA;
 
         av_log(h->avctx, AV_LOG_WARNING, "Ignoring cropping information.\n");
-        h->sps.crop_bottom =
-        h->sps.crop_top    =
-        h->sps.crop_right  =
-        h->sps.crop_left   =
-        h->sps.crop        = 0;
+        sps->crop_bottom =
+        sps->crop_top    =
+        sps->crop_right  =
+        sps->crop_left   =
+        sps->crop        = 0;
 
         width  = h->width;
         height = h->height;
@@ -980,21 +902,22 @@ static int init_dimensions(H264Context *h)
 
 static int h264_slice_header_init(H264Context *h)
 {
+    const SPS *sps = h->ps.sps;
     int nb_slices = (HAVE_THREADS &&
                      h->avctx->active_thread_type & FF_THREAD_SLICE) ?
                     h->avctx->thread_count : 1;
     int i, ret;
 
-    ff_set_sar(h->avctx, h->sps.sar);
+    ff_set_sar(h->avctx, sps->sar);
     av_pix_fmt_get_chroma_sub_sample(h->avctx->pix_fmt,
                                      &h->chroma_x_shift, &h->chroma_y_shift);
 
-    if (h->sps.timing_info_present_flag) {
-        int64_t den = h->sps.time_scale;
+    if (sps->timing_info_present_flag) {
+        int64_t den = sps->time_scale;
         if (h->x264_build < 44U)
             den *= 2;
         av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
-                  h->sps.num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
+                  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
     }
 
     ff_h264_free_tables(h);
@@ -1012,7 +935,7 @@ static int h264_slice_header_init(H264Context *h)
 #if FF_API_CAP_VDPAU
     if (h->avctx->codec &&
         h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU &&
-        (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
+        (sps->bit_depth_luma != 8 || sps->chroma_format_idc > 1)) {
         av_log(h->avctx, AV_LOG_ERROR,
                 "VDPAU decoding does not support video colorspace.\n");
         ret = AVERROR_INVALIDDATA;
@@ -1020,29 +943,29 @@ static int h264_slice_header_init(H264Context *h)
     }
 #endif
 
-    if (h->sps.bit_depth_luma < 8 || h->sps.bit_depth_luma > 14 ||
-        h->sps.bit_depth_luma == 11 || h->sps.bit_depth_luma == 13
+    if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
+        sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
     ) {
         av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
-               h->sps.bit_depth_luma);
+               sps->bit_depth_luma);
         ret = AVERROR_INVALIDDATA;
         goto fail;
     }
 
     h->cur_bit_depth_luma         =
-    h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
-    h->cur_chroma_format_idc      = h->sps.chroma_format_idc;
-    h->pixel_shift                = h->sps.bit_depth_luma > 8;
-    h->chroma_format_idc          = h->sps.chroma_format_idc;
-    h->bit_depth_luma             = h->sps.bit_depth_luma;
-
-    ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
-                    h->sps.chroma_format_idc);
-    ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma);
-    ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma);
-    ff_h264_pred_init(&h->hpc, h->avctx->codec_id, h->sps.bit_depth_luma,
-                      h->sps.chroma_format_idc);
-    ff_videodsp_init(&h->vdsp, h->sps.bit_depth_luma);
+    h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
+    h->cur_chroma_format_idc      = sps->chroma_format_idc;
+    h->pixel_shift                = sps->bit_depth_luma > 8;
+    h->chroma_format_idc          = sps->chroma_format_idc;
+    h->bit_depth_luma             = sps->bit_depth_luma;
+
+    ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
+                    sps->chroma_format_idc);
+    ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
+    ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
+    ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
+                      sps->chroma_format_idc);
+    ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
 
     if (nb_slices > H264_MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
         int max_slices;
@@ -1109,6 +1032,8 @@ static enum AVPixelFormat non_j_pixfmt(enum AVPixelFormat a)
  */
 int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
 {
+    const SPS *sps;
+    const PPS *pps;
     unsigned int first_mb_in_slice;
     unsigned int pps_id;
     int ret;
@@ -1120,7 +1045,6 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
     int first_slice = sl == h->slice_ctx && !h->current_slice;
     int frame_num, droppable, picture_structure;
     int mb_aff_frame, last_mb_aff_frame;
-    PPS *pps;
 
     if (first_slice)
         av_assert0(!h->setup_finished);
@@ -1218,7 +1142,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
         av_log(h->avctx, AV_LOG_ERROR, "pps_id %u out of range\n", pps_id);
         return AVERROR_INVALIDDATA;
     }
-    if (!h->pps_buffers[pps_id]) {
+    if (!h->ps.pps_list[pps_id]) {
         av_log(h->avctx, AV_LOG_ERROR,
                "non-existing PPS %u referenced\n",
                pps_id);
@@ -1231,29 +1155,33 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
         return AVERROR_INVALIDDATA;
     }
 
-    pps = h->pps_buffers[pps_id];
+    pps = (const PPS*)h->ps.pps_list[pps_id]->data;
 
-    if (!h->sps_buffers[pps->sps_id]) {
+    if (!h->ps.sps_list[pps->sps_id]) {
         av_log(h->avctx, AV_LOG_ERROR,
                "non-existing SPS %u referenced\n",
-               h->pps.sps_id);
+               pps->sps_id);
         return AVERROR_INVALIDDATA;
     }
 
     if (first_slice) {
-        h->pps = *h->pps_buffers[pps_id];
+        av_buffer_unref(&h->ps.pps_ref);
+        h->ps.pps = NULL;
+        h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[pps_id]);
+        if (!h->ps.pps_ref)
+            return AVERROR(ENOMEM);
+        h->ps.pps = (const PPS*)h->ps.pps_ref->data;
     } else {
-        if (h->pps.sps_id != pps->sps_id ||
-            h->pps.transform_8x8_mode != pps->transform_8x8_mode ||
-            (h->setup_finished && h->dequant_coeff_pps != pps_id)) {
+        if (h->ps.pps->sps_id != pps->sps_id ||
+            h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
+            (h->setup_finished && h->ps.pps != pps)*/) {
             av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
             return AVERROR_INVALIDDATA;
         }
     }
 
-    if (pps->sps_id != h->sps.sps_id ||
-        pps->sps_id != h->current_sps_id ||
-        h->sps_buffers[pps->sps_id]->new) {
+    if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data ||
+        pps->sps_id != h->current_sps_id) {
 
         if (!first_slice) {
             av_log(h->avctx, AV_LOG_ERROR,
@@ -1261,22 +1189,27 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
             return AVERROR_INVALIDDATA;
         }
 
-        h->sps = *h->sps_buffers[h->pps.sps_id];
-
-        if (h->mb_width  != h->sps.mb_width ||
-            h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) ||
-            h->cur_bit_depth_luma    != h->sps.bit_depth_luma ||
-            h->cur_chroma_format_idc != h->sps.chroma_format_idc
+        av_buffer_unref(&h->ps.sps_ref);
+        h->ps.sps = NULL;
+        h->ps.sps_ref = av_buffer_ref(h->ps.sps_list[h->ps.pps->sps_id]);
+        if (!h->ps.sps_ref)
+            return AVERROR(ENOMEM);
+        h->ps.sps = (const SPS*)h->ps.sps_ref->data;
+
+        if (h->mb_width  != h->ps.sps->mb_width ||
+            h->mb_height != h->ps.sps->mb_height * (2 - h->ps.sps->frame_mbs_only_flag) ||
+            h->cur_bit_depth_luma    != h->ps.sps->bit_depth_luma ||
+            h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
         )
             needs_reinit = 1;
 
-        if (h->bit_depth_luma    != h->sps.bit_depth_luma ||
-            h->chroma_format_idc != h->sps.chroma_format_idc)
+        if (h->bit_depth_luma    != h->ps.sps->bit_depth_luma ||
+            h->chroma_format_idc != h->ps.sps->chroma_format_idc)
             needs_reinit         = 1;
 
         if (h->flags & AV_CODEC_FLAG_LOW_DELAY ||
-            (h->sps.bitstream_restriction_flag &&
-             !h->sps.num_reorder_frames)) {
+            (h->ps.sps->bitstream_restriction_flag &&
+             !h->ps.sps->num_reorder_frames)) {
             if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
                 av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
                        "Reenabling low delay requires a codec flush.\n");
@@ -1289,34 +1222,37 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
 
     }
 
+    pps = h->ps.pps;
+    sps = h->ps.sps;
+
     must_reinit = (h->context_initialized &&
-                    (   16*h->sps.mb_width != h->avctx->coded_width
-                     || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
-                     || h->cur_bit_depth_luma    != h->sps.bit_depth_luma
-                     || h->cur_chroma_format_idc != h->sps.chroma_format_idc
-                     || h->mb_width  != h->sps.mb_width
-                     || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
+                    (   16*sps->mb_width != h->avctx->coded_width
+                     || 16*sps->mb_height * (2 - sps->frame_mbs_only_flag) != h->avctx->coded_height
+                     || h->cur_bit_depth_luma    != sps->bit_depth_luma
+                     || h->cur_chroma_format_idc != sps->chroma_format_idc
+                     || h->mb_width  != sps->mb_width
+                     || h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag)
                     ));
     if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
         || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
         must_reinit = 1;
 
-    if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio))
+    if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
         must_reinit = 1;
 
     if (!h->setup_finished) {
-        h->avctx->profile = ff_h264_get_profile(&h->sps);
-        h->avctx->level   = h->sps.level_idc;
-        h->avctx->refs    = h->sps.ref_frame_count;
+        h->avctx->profile = ff_h264_get_profile(sps);
+        h->avctx->level   = sps->level_idc;
+        h->avctx->refs    = sps->ref_frame_count;
 
-        h->mb_width  = h->sps.mb_width;
-        h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
+        h->mb_width  = sps->mb_width;
+        h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag);
         h->mb_num    = h->mb_width * h->mb_height;
         h->mb_stride = h->mb_width + 1;
 
         h->b_stride = h->mb_width * 4;
 
-        h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
+        h->chroma_y_shift = sps->chroma_format_idc <= 1; // 400 uses yuv420p
 
         h->width  = 16 * h->mb_width;
         h->height = 16 * h->mb_height;
@@ -1325,15 +1261,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
         if (ret < 0)
             return ret;
 
-        if (h->sps.video_signal_type_present_flag) {
-            h->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG
+        if (sps->video_signal_type_present_flag) {
+            h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
                                                         : AVCOL_RANGE_MPEG;
-            if (h->sps.colour_description_present_flag) {
-                if (h->avctx->colorspace != h->sps.colorspace)
+            if (sps->colour_description_present_flag) {
+                if (h->avctx->colorspace != sps->colorspace)
                     needs_reinit = 1;
-                h->avctx->color_primaries = h->sps.color_primaries;
-                h->avctx->color_trc       = h->sps.color_trc;
-                h->avctx->colorspace      = h->sps.colorspace;
+                h->avctx->color_primaries = sps->color_primaries;
+                h->avctx->color_trc       = sps->color_trc;
+                h->avctx->colorspace      = sps->colorspace;
             }
         }
     }
@@ -1386,12 +1322,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
         }
     }
 
-    if (!h->current_slice && h->dequant_coeff_pps != pps_id) {
-        h->dequant_coeff_pps = pps_id;
-        ff_h264_init_dequant_tables(h);
-    }
-
-    frame_num = get_bits(&sl->gb, h->sps.log2_max_frame_num);
+    frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
     if (!first_slice) {
         if (h->frame_num != frame_num) {
             av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
@@ -1410,10 +1341,10 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
     last_pic_droppable = h->droppable;
 
     droppable = h->nal_ref_idc == 0;
-    if (h->sps.frame_mbs_only_flag) {
+    if (sps->frame_mbs_only_flag) {
         picture_structure = PICT_FRAME;
     } else {
-        if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
+        if (!h->ps.sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
             av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
             return -1;
         }
@@ -1424,7 +1355,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
             picture_structure = PICT_TOP_FIELD + bottom_field_flag;
         } else {
             picture_structure = PICT_FRAME;
-            mb_aff_frame      = h->sps.mb_aff;
+            mb_aff_frame      = sps->mb_aff;
         }
     }
 
@@ -1456,13 +1387,13 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
          * frames just to throw them away */
         if (h->frame_num != h->prev_frame_num) {
             int unwrap_prev_frame_num = h->prev_frame_num;
-            int max_frame_num         = 1 << h->sps.log2_max_frame_num;
+            int max_frame_num         = 1 << sps->log2_max_frame_num;
 
             if (unwrap_prev_frame_num > h->frame_num)
                 unwrap_prev_frame_num -= max_frame_num;
 
-            if ((h->frame_num - unwrap_prev_frame_num) > h->sps.ref_frame_count) {
-                unwrap_prev_frame_num = (h->frame_num - h->sps.ref_frame_count) - 1;
+            if ((h->frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
+                unwrap_prev_frame_num = (h->frame_num - sps->ref_frame_count) - 1;
                 if (unwrap_prev_frame_num < 0)
                     unwrap_prev_frame_num += max_frame_num;
 
@@ -1528,11 +1459,11 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
         }
 
         while (h->frame_num != h->prev_frame_num && !h->first_field &&
-               h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
+               h->frame_num != (h->prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
             H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
             av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
                    h->frame_num, h->prev_frame_num);
-            if (!h->sps.gaps_in_frame_num_allowed_flag)
+            if (!sps->gaps_in_frame_num_allowed_flag)
                 for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
                     h->last_pocs[i] = INT_MIN;
             ret = h264_frame_start(h);
@@ -1542,9 +1473,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
             }
 
             h->prev_frame_num++;
-            h->prev_frame_num        %= 1 << h->sps.log2_max_frame_num;
+            h->prev_frame_num        %= 1 << sps->log2_max_frame_num;
             h->cur_pic_ptr->frame_num = h->prev_frame_num;
-            h->cur_pic_ptr->invalid_gap = !h->sps.gaps_in_frame_num_allowed_flag;
+            h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
             ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
             ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
             ret = ff_generate_sliding_window_mmcos(h, 1);
@@ -1647,35 +1578,35 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
 
     if (h->picture_structure == PICT_FRAME) {
         h->curr_pic_num = h->frame_num;
-        h->max_pic_num  = 1 << h->sps.log2_max_frame_num;
+        h->max_pic_num  = 1 << sps->log2_max_frame_num;
     } else {
         h->curr_pic_num = 2 * h->frame_num + 1;
-        h->max_pic_num  = 1 << (h->sps.log2_max_frame_num + 1);
+        h->max_pic_num  = 1 << (sps->log2_max_frame_num + 1);
     }
 
     if (h->nal_unit_type == NAL_IDR_SLICE)
         get_ue_golomb_long(&sl->gb); /* idr_pic_id */
 
-    if (h->sps.poc_type == 0) {
-        int poc_lsb = get_bits(&sl->gb, h->sps.log2_max_poc_lsb);
+    if (sps->poc_type == 0) {
+        int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
 
         if (!h->setup_finished)
             h->poc_lsb = poc_lsb;
 
-        if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
+        if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
             int delta_poc_bottom = get_se_golomb(&sl->gb);
             if (!h->setup_finished)
                 h->delta_poc_bottom = delta_poc_bottom;
         }
     }
 
-    if (h->sps.poc_type == 1 && !h->sps.delta_pic_order_always_zero_flag) {
+    if (sps->poc_type == 1 && !sps->delta_pic_order_always_zero_flag) {
         int delta_poc = get_se_golomb(&sl->gb);
 
         if (!h->setup_finished)
             h->delta_poc[0] = delta_poc;
 
-        if (h->pps.pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
+        if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
             delta_poc = get_se_golomb(&sl->gb);
 
             if (!h->setup_finished)
@@ -1686,14 +1617,14 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
     if (!h->setup_finished)
         ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc);
 
-    if (h->pps.redundant_pic_cnt_present)
+    if (pps->redundant_pic_cnt_present)
         sl->redundant_pic_count = get_ue_golomb(&sl->gb);
 
     if (sl->slice_type_nos == AV_PICTURE_TYPE_B)
         sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
 
     ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count,
-                                  &sl->gb, &h->pps, sl->slice_type_nos,
+                                  &sl->gb, pps, sl->slice_type_nos,
                                   h->picture_structure, h->avctx);
     if (ret < 0)
         return ret;
@@ -1706,12 +1637,12 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
        }
     }
 
-    if ((h->pps.weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
-        (h->pps.weighted_bipred_idc == 1 &&
+    if ((pps->weighted_pred && sl->slice_type_nos == AV_PICTURE_TYPE_P) ||
+        (pps->weighted_bipred_idc == 1 &&
          sl->slice_type_nos == AV_PICTURE_TYPE_B))
-        ff_h264_pred_weight_table(&sl->gb, &h->sps, sl->ref_count,
+        ff_h264_pred_weight_table(&sl->gb, sps, sl->ref_count,
                                   sl->slice_type_nos, &sl->pwt);
-    else if (h->pps.weighted_bipred_idc == 2 &&
+    else if (pps->weighted_bipred_idc == 2 &&
              sl->slice_type_nos == AV_PICTURE_TYPE_B) {
         implicit_weight_table(h, sl, -1);
     } else {
@@ -1738,7 +1669,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
     if (FRAME_MBAFF(h)) {
         ff_h264_fill_mbaff_ref_list(h, sl);
 
-        if (h->pps.weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) {
+        if (pps->weighted_bipred_idc == 2 && sl->slice_type_nos == AV_PICTURE_TYPE_B) {
             implicit_weight_table(h, sl, 0);
             implicit_weight_table(h, sl, 1);
         }
@@ -1748,7 +1679,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
         ff_h264_direct_dist_scale_factor(h, sl);
     ff_h264_direct_ref_list_init(h, sl);
 
-    if (sl->slice_type_nos != AV_PICTURE_TYPE_I && h->pps.cabac) {
+    if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
         tmp = get_ue_golomb_31(&sl->gb);
         if (tmp > 2) {
             av_log(h->avctx, AV_LOG_ERROR, "cabac_init_idc %u overflow\n", tmp);
@@ -1758,8 +1689,8 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
     }
 
     sl->last_qscale_diff = 0;
-    tmp = h->pps.init_qp + get_se_golomb(&sl->gb);
-    if (tmp > 51 + 6 * (h->sps.bit_depth_luma - 8)) {
+    tmp = pps->init_qp + get_se_golomb(&sl->gb);
+    if (tmp > 51 + 6 * (sps->bit_depth_luma - 8)) {
         av_log(h->avctx, AV_LOG_ERROR, "QP %u out of range\n", tmp);
         return AVERROR_INVALIDDATA;
     }
@@ -1776,7 +1707,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
     sl->deblocking_filter     = 1;
     sl->slice_alpha_c0_offset = 0;
     sl->slice_beta_offset     = 0;
-    if (h->pps.deblocking_filter_parameters_present) {
+    if (pps->deblocking_filter_parameters_present) {
         tmp = get_ue_golomb_31(&sl->gb);
         if (tmp > 2) {
             av_log(h->avctx, AV_LOG_ERROR,
@@ -1838,9 +1769,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
     sl->qp_thresh = 15 -
                    FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) -
                    FFMAX3(0,
-                          h->pps.chroma_qp_index_offset[0],
-                          h->pps.chroma_qp_index_offset[1]) +
-                   6 * (h->sps.bit_depth_luma - 8);
+                          pps->chroma_qp_index_offset[0],
+                          pps->chroma_qp_index_offset[1]) +
+                   6 * (sps->bit_depth_luma - 8);
 
     sl->slice_num       = ++h->current_slice;
 
@@ -1887,9 +1818,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
     }
 
     h->au_pps_id = pps_id;
-    h->sps.new =
-    h->sps_buffers[h->pps.sps_id]->new = 0;
-    h->current_sps_id = h->pps.sps_id;
+    h->current_sps_id = h->ps.pps->sps_id;
 
     if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
         av_log(h->avctx, AV_LOG_DEBUG,
@@ -2120,7 +2049,7 @@ static int fill_filter_caches(const H264Context *h, H264SliceContext *sl, int mb
 
     /* CAVLC 8x8dct requires NNZ values for residual decoding that differ
      * from what the loop filter needs */
-    if (!CABAC(h) && h->pps.transform_8x8_mode) {
+    if (!CABAC(h) && h->ps.pps->transform_8x8_mode) {
         if (IS_8x8DCT(top_type)) {
             nnz_cache[4 + 8 * 0] =
             nnz_cache[5 + 8 * 0] = (h->cbp_table[top_xy] & 0x4000) >> 12;
@@ -2321,7 +2250,7 @@ static int decode_slice(struct AVCodecContext *avctx, void *arg)
         }
     }
 
-    if (h->pps.cabac) {
+    if (h->ps.pps->cabac) {
         /* realign */
         align_get_bits(&sl->gb);
 
diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
index 7470a16..9b13fa9 100644
--- a/libavcodec/vaapi_h264.c
+++ b/libavcodec/vaapi_h264.c
@@ -228,6 +228,8 @@ static int vaapi_h264_start_frame(AVCodecContext          *avctx,
 {
     H264Context * const h = avctx->priv_data;
     FFVAContext * const vactx = ff_vaapi_get_context(avctx);
+    const PPS *pps = h->ps.pps;
+    const SPS *sps = h->ps.sps;
     VAPictureParameterBufferH264 *pic_param;
     VAIQMatrixBufferH264 *iq_matrix;
 
@@ -244,38 +246,38 @@ static int vaapi_h264_start_frame(AVCodecContext          *avctx,
         return -1;
     pic_param->picture_width_in_mbs_minus1                      = h->mb_width - 1;
     pic_param->picture_height_in_mbs_minus1                     = h->mb_height - 1;
-    pic_param->bit_depth_luma_minus8                            = h->sps.bit_depth_luma - 8;
-    pic_param->bit_depth_chroma_minus8                          = h->sps.bit_depth_chroma - 8;
-    pic_param->num_ref_frames                                   = h->sps.ref_frame_count;
+    pic_param->bit_depth_luma_minus8                            = sps->bit_depth_luma - 8;
+    pic_param->bit_depth_chroma_minus8                          = sps->bit_depth_chroma - 8;
+    pic_param->num_ref_frames                                   = sps->ref_frame_count;
     pic_param->seq_fields.value                                 = 0; /* reset all bits */
-    pic_param->seq_fields.bits.chroma_format_idc                = h->sps.chroma_format_idc;
-    pic_param->seq_fields.bits.residual_colour_transform_flag   = h->sps.residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
-    pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag;
-    pic_param->seq_fields.bits.frame_mbs_only_flag              = h->sps.frame_mbs_only_flag;
-    pic_param->seq_fields.bits.mb_adaptive_frame_field_flag     = h->sps.mb_aff;
-    pic_param->seq_fields.bits.direct_8x8_inference_flag        = h->sps.direct_8x8_inference_flag;
-    pic_param->seq_fields.bits.MinLumaBiPredSize8x8             = h->sps.level_idc >= 31; /* A.3.3.2 */
-    pic_param->seq_fields.bits.log2_max_frame_num_minus4        = h->sps.log2_max_frame_num - 4;
-    pic_param->seq_fields.bits.pic_order_cnt_type               = h->sps.poc_type;
-    pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;
-    pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
-    pic_param->num_slice_groups_minus1                          = h->pps.slice_group_count - 1;
-    pic_param->slice_group_map_type                             = h->pps.mb_slice_group_map_type;
+    pic_param->seq_fields.bits.chroma_format_idc                = sps->chroma_format_idc;
+    pic_param->seq_fields.bits.residual_colour_transform_flag   = sps->residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
+    pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = sps->gaps_in_frame_num_allowed_flag;
+    pic_param->seq_fields.bits.frame_mbs_only_flag              = sps->frame_mbs_only_flag;
+    pic_param->seq_fields.bits.mb_adaptive_frame_field_flag     = sps->mb_aff;
+    pic_param->seq_fields.bits.direct_8x8_inference_flag        = sps->direct_8x8_inference_flag;
+    pic_param->seq_fields.bits.MinLumaBiPredSize8x8             = sps->level_idc >= 31; /* A.3.3.2 */
+    pic_param->seq_fields.bits.log2_max_frame_num_minus4        = sps->log2_max_frame_num - 4;
+    pic_param->seq_fields.bits.pic_order_cnt_type               = sps->poc_type;
+    pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4;
+    pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
+    pic_param->num_slice_groups_minus1                          = pps->slice_group_count - 1;
+    pic_param->slice_group_map_type                             = pps->mb_slice_group_map_type;
     pic_param->slice_group_change_rate_minus1                   = 0; /* XXX: unimplemented in FFmpeg */
-    pic_param->pic_init_qp_minus26                              = h->pps.init_qp - 26;
-    pic_param->pic_init_qs_minus26                              = h->pps.init_qs - 26;
-    pic_param->chroma_qp_index_offset                           = h->pps.chroma_qp_index_offset[0];
-    pic_param->second_chroma_qp_index_offset                    = h->pps.chroma_qp_index_offset[1];
+    pic_param->pic_init_qp_minus26                              = pps->init_qp - 26;
+    pic_param->pic_init_qs_minus26                              = pps->init_qs - 26;
+    pic_param->chroma_qp_index_offset                           = pps->chroma_qp_index_offset[0];
+    pic_param->second_chroma_qp_index_offset                    = pps->chroma_qp_index_offset[1];
     pic_param->pic_fields.value                                 = 0; /* reset all bits */
-    pic_param->pic_fields.bits.entropy_coding_mode_flag         = h->pps.cabac;
-    pic_param->pic_fields.bits.weighted_pred_flag               = h->pps.weighted_pred;
-    pic_param->pic_fields.bits.weighted_bipred_idc              = h->pps.weighted_bipred_idc;
-    pic_param->pic_fields.bits.transform_8x8_mode_flag          = h->pps.transform_8x8_mode;
+    pic_param->pic_fields.bits.entropy_coding_mode_flag         = pps->cabac;
+    pic_param->pic_fields.bits.weighted_pred_flag               = pps->weighted_pred;
+    pic_param->pic_fields.bits.weighted_bipred_idc              = pps->weighted_bipred_idc;
+    pic_param->pic_fields.bits.transform_8x8_mode_flag          = pps->transform_8x8_mode;
     pic_param->pic_fields.bits.field_pic_flag                   = h->picture_structure != PICT_FRAME;
-    pic_param->pic_fields.bits.constrained_intra_pred_flag      = h->pps.constrained_intra_pred;
-    pic_param->pic_fields.bits.pic_order_present_flag           = h->pps.pic_order_present;
-    pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
-    pic_param->pic_fields.bits.redundant_pic_cnt_present_flag   = h->pps.redundant_pic_cnt_present;
+    pic_param->pic_fields.bits.constrained_intra_pred_flag      = pps->constrained_intra_pred;
+    pic_param->pic_fields.bits.pic_order_present_flag           = pps->pic_order_present;
+    pic_param->pic_fields.bits.deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
+    pic_param->pic_fields.bits.redundant_pic_cnt_present_flag   = pps->redundant_pic_cnt_present;
     pic_param->pic_fields.bits.reference_pic_flag               = h->nal_ref_idc != 0;
     pic_param->frame_num                                        = h->frame_num;
 
@@ -283,9 +285,9 @@ static int vaapi_h264_start_frame(AVCodecContext          *avctx,
     iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264));
     if (!iq_matrix)
         return -1;
-    memcpy(iq_matrix->ScalingList4x4, h->pps.scaling_matrix4, sizeof(iq_matrix->ScalingList4x4));
-    memcpy(iq_matrix->ScalingList8x8[0], h->pps.scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0]));
-    memcpy(iq_matrix->ScalingList8x8[1], h->pps.scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0]));
+    memcpy(iq_matrix->ScalingList4x4, pps->scaling_matrix4, sizeof(iq_matrix->ScalingList4x4));
+    memcpy(iq_matrix->ScalingList8x8[0], pps->scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0]));
+    memcpy(iq_matrix->ScalingList8x8[1], pps->scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0]));
     return 0;
 }
 
@@ -337,7 +339,7 @@ static int vaapi_h264_decode_slice(AVCodecContext *avctx,
     slice_param->num_ref_idx_l0_active_minus1   = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0;
     slice_param->num_ref_idx_l1_active_minus1   = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0;
     slice_param->cabac_init_idc                 = sl->cabac_init_idc;
-    slice_param->slice_qp_delta                 = sl->qscale - h->pps.init_qp;
+    slice_param->slice_qp_delta                 = sl->qscale - h->ps.pps->init_qp;
     slice_param->disable_deblocking_filter_idc  = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter;
     slice_param->slice_alpha_c0_offset_div2     = sl->slice_alpha_c0_offset / 2;
     slice_param->slice_beta_offset_div2         = sl->slice_beta_offset     / 2;
diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c
index 7f8690e..d791d15 100644
--- a/libavcodec/vdpau.c
+++ b/libavcodec/vdpau.c
@@ -476,30 +476,30 @@ void ff_vdpau_h264_picture_complete(H264Context *h)
     render->info.h264.is_reference                           = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
     render->info.h264.field_pic_flag                         = h->picture_structure != PICT_FRAME;
     render->info.h264.bottom_field_flag                      = h->picture_structure == PICT_BOTTOM_FIELD;
-    render->info.h264.num_ref_frames                         = h->sps.ref_frame_count;
-    render->info.h264.mb_adaptive_frame_field_flag           = h->sps.mb_aff && !render->info.h264.field_pic_flag;
-    render->info.h264.constrained_intra_pred_flag            = h->pps.constrained_intra_pred;
-    render->info.h264.weighted_pred_flag                     = h->pps.weighted_pred;
-    render->info.h264.weighted_bipred_idc                    = h->pps.weighted_bipred_idc;
-    render->info.h264.frame_mbs_only_flag                    = h->sps.frame_mbs_only_flag;
-    render->info.h264.transform_8x8_mode_flag                = h->pps.transform_8x8_mode;
-    render->info.h264.chroma_qp_index_offset                 = h->pps.chroma_qp_index_offset[0];
-    render->info.h264.second_chroma_qp_index_offset          = h->pps.chroma_qp_index_offset[1];
-    render->info.h264.pic_init_qp_minus26                    = h->pps.init_qp - 26;
-    render->info.h264.num_ref_idx_l0_active_minus1           = h->pps.ref_count[0] - 1;
-    render->info.h264.num_ref_idx_l1_active_minus1           = h->pps.ref_count[1] - 1;
-    render->info.h264.log2_max_frame_num_minus4              = h->sps.log2_max_frame_num - 4;
-    render->info.h264.pic_order_cnt_type                     = h->sps.poc_type;
-    render->info.h264.log2_max_pic_order_cnt_lsb_minus4      = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4;
-    render->info.h264.delta_pic_order_always_zero_flag       = h->sps.delta_pic_order_always_zero_flag;
-    render->info.h264.direct_8x8_inference_flag              = h->sps.direct_8x8_inference_flag;
-    render->info.h264.entropy_coding_mode_flag               = h->pps.cabac;
-    render->info.h264.pic_order_present_flag                 = h->pps.pic_order_present;
-    render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
-    render->info.h264.redundant_pic_cnt_present_flag         = h->pps.redundant_pic_cnt_present;
-    memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
-    memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
-    memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
+    render->info.h264.num_ref_frames                         = h->ps.sps->ref_frame_count;
+    render->info.h264.mb_adaptive_frame_field_flag           = h->ps.sps->mb_aff && !render->info.h264.field_pic_flag;
+    render->info.h264.constrained_intra_pred_flag            = h->ps.pps->constrained_intra_pred;
+    render->info.h264.weighted_pred_flag                     = h->ps.pps->weighted_pred;
+    render->info.h264.weighted_bipred_idc                    = h->ps.pps->weighted_bipred_idc;
+    render->info.h264.frame_mbs_only_flag                    = h->ps.sps->frame_mbs_only_flag;
+    render->info.h264.transform_8x8_mode_flag                = h->ps.pps->transform_8x8_mode;
+    render->info.h264.chroma_qp_index_offset                 = h->ps.pps->chroma_qp_index_offset[0];
+    render->info.h264.second_chroma_qp_index_offset          = h->ps.pps->chroma_qp_index_offset[1];
+    render->info.h264.pic_init_qp_minus26                    = h->ps.pps->init_qp - 26;
+    render->info.h264.num_ref_idx_l0_active_minus1           = h->ps.pps->ref_count[0] - 1;
+    render->info.h264.num_ref_idx_l1_active_minus1           = h->ps.pps->ref_count[1] - 1;
+    render->info.h264.log2_max_frame_num_minus4              = h->ps.sps->log2_max_frame_num - 4;
+    render->info.h264.pic_order_cnt_type                     = h->ps.sps->poc_type;
+    render->info.h264.log2_max_pic_order_cnt_lsb_minus4      = h->ps.sps->poc_type ? 0 : h->ps.sps->log2_max_poc_lsb - 4;
+    render->info.h264.delta_pic_order_always_zero_flag       = h->ps.sps->delta_pic_order_always_zero_flag;
+    render->info.h264.direct_8x8_inference_flag              = h->ps.sps->direct_8x8_inference_flag;
+    render->info.h264.entropy_coding_mode_flag               = h->ps.pps->cabac;
+    render->info.h264.pic_order_present_flag                 = h->ps.pps->pic_order_present;
+    render->info.h264.deblocking_filter_control_present_flag = h->ps.pps->deblocking_filter_parameters_present;
+    render->info.h264.redundant_pic_cnt_present_flag         = h->ps.pps->redundant_pic_cnt_present;
+    memcpy(render->info.h264.scaling_lists_4x4, h->ps.pps->scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
+    memcpy(render->info.h264.scaling_lists_8x8[0], h->ps.pps->scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
+    memcpy(render->info.h264.scaling_lists_8x8[1], h->ps.pps->scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
 
     ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height);
     render->bitstream_buffers_used = 0;
diff --git a/libavcodec/vdpau_h264.c b/libavcodec/vdpau_h264.c
index 10376ac..124fc98 100644
--- a/libavcodec/vdpau_h264.c
+++ b/libavcodec/vdpau_h264.c
@@ -120,6 +120,8 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx,
                                   const uint8_t *buffer, uint32_t size)
 {
     H264Context * const h = avctx->priv_data;
+    const PPS *pps = h->ps.pps;
+    const SPS *sps = h->ps.sps;
     H264Picture *pic = h->cur_pic_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     VdpPictureInfoH264 *info = &pic_ctx->info.h264;
@@ -135,37 +137,37 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx,
     info->frame_num                              = h->frame_num;
     info->field_pic_flag                         = h->picture_structure != PICT_FRAME;
     info->bottom_field_flag                      = h->picture_structure == PICT_BOTTOM_FIELD;
-    info->num_ref_frames                         = h->sps.ref_frame_count;
-    info->mb_adaptive_frame_field_flag           = h->sps.mb_aff && !info->field_pic_flag;
-    info->constrained_intra_pred_flag            = h->pps.constrained_intra_pred;
-    info->weighted_pred_flag                     = h->pps.weighted_pred;
-    info->weighted_bipred_idc                    = h->pps.weighted_bipred_idc;
-    info->frame_mbs_only_flag                    = h->sps.frame_mbs_only_flag;
-    info->transform_8x8_mode_flag                = h->pps.transform_8x8_mode;
-    info->chroma_qp_index_offset                 = h->pps.chroma_qp_index_offset[0];
-    info->second_chroma_qp_index_offset          = h->pps.chroma_qp_index_offset[1];
-    info->pic_init_qp_minus26                    = h->pps.init_qp - 26;
-    info->num_ref_idx_l0_active_minus1           = h->pps.ref_count[0] - 1;
-    info->num_ref_idx_l1_active_minus1           = h->pps.ref_count[1] - 1;
-    info->log2_max_frame_num_minus4              = h->sps.log2_max_frame_num - 4;
-    info->pic_order_cnt_type                     = h->sps.poc_type;
-    info->log2_max_pic_order_cnt_lsb_minus4      = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4;
-    info->delta_pic_order_always_zero_flag       = h->sps.delta_pic_order_always_zero_flag;
-    info->direct_8x8_inference_flag              = h->sps.direct_8x8_inference_flag;
+    info->num_ref_frames                         = sps->ref_frame_count;
+    info->mb_adaptive_frame_field_flag           = sps->mb_aff && !info->field_pic_flag;
+    info->constrained_intra_pred_flag            = pps->constrained_intra_pred;
+    info->weighted_pred_flag                     = pps->weighted_pred;
+    info->weighted_bipred_idc                    = pps->weighted_bipred_idc;
+    info->frame_mbs_only_flag                    = sps->frame_mbs_only_flag;
+    info->transform_8x8_mode_flag                = pps->transform_8x8_mode;
+    info->chroma_qp_index_offset                 = pps->chroma_qp_index_offset[0];
+    info->second_chroma_qp_index_offset          = pps->chroma_qp_index_offset[1];
+    info->pic_init_qp_minus26                    = pps->init_qp - 26;
+    info->num_ref_idx_l0_active_minus1           = pps->ref_count[0] - 1;
+    info->num_ref_idx_l1_active_minus1           = pps->ref_count[1] - 1;
+    info->log2_max_frame_num_minus4              = sps->log2_max_frame_num - 4;
+    info->pic_order_cnt_type                     = sps->poc_type;
+    info->log2_max_pic_order_cnt_lsb_minus4      = sps->poc_type ? 0 : sps->log2_max_poc_lsb - 4;
+    info->delta_pic_order_always_zero_flag       = sps->delta_pic_order_always_zero_flag;
+    info->direct_8x8_inference_flag              = sps->direct_8x8_inference_flag;
 #ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
-    info2->qpprime_y_zero_transform_bypass_flag  = h->sps.transform_bypass;
-    info2->separate_colour_plane_flag            = h->sps.residual_color_transform_flag;
+    info2->qpprime_y_zero_transform_bypass_flag  = sps->transform_bypass;
+    info2->separate_colour_plane_flag            = sps->residual_color_transform_flag;
 #endif
-    info->entropy_coding_mode_flag               = h->pps.cabac;
-    info->pic_order_present_flag                 = h->pps.pic_order_present;
-    info->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
-    info->redundant_pic_cnt_present_flag         = h->pps.redundant_pic_cnt_present;
+    info->entropy_coding_mode_flag               = pps->cabac;
+    info->pic_order_present_flag                 = pps->pic_order_present;
+    info->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
+    info->redundant_pic_cnt_present_flag         = pps->redundant_pic_cnt_present;
 
-    memcpy(info->scaling_lists_4x4, h->pps.scaling_matrix4,
+    memcpy(info->scaling_lists_4x4, pps->scaling_matrix4,
            sizeof(info->scaling_lists_4x4));
-    memcpy(info->scaling_lists_8x8[0], h->pps.scaling_matrix8[0],
+    memcpy(info->scaling_lists_8x8[0], pps->scaling_matrix8[0],
            sizeof(info->scaling_lists_8x8[0]));
-    memcpy(info->scaling_lists_8x8[1], h->pps.scaling_matrix8[3],
+    memcpy(info->scaling_lists_8x8[1], pps->scaling_matrix8[3],
            sizeof(info->scaling_lists_8x8[1]));
 
     vdpau_h264_set_reference_frames(avctx);
diff --git a/libavcodec/videotoolbox.c b/libavcodec/videotoolbox.c
index 2f4d531..4dc843d 100644
--- a/libavcodec/videotoolbox.c
+++ b/libavcodec/videotoolbox.c
@@ -84,7 +84,7 @@ CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
     H264Context *h     = avctx->priv_data;
     CFDataRef data = NULL;
     uint8_t *p;
-    int vt_extradata_size = 6 + 3 + h->sps.data_size + 4 + h->pps.data_size;
+    int vt_extradata_size = 6 + 3 + h->ps.sps->data_size + 4 + h->ps.sps->data_size;
     uint8_t *vt_extradata = av_malloc(vt_extradata_size);
     if (!vt_extradata)
         return NULL;
@@ -92,15 +92,15 @@ CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
     p = vt_extradata;
 
     AV_W8(p + 0, 1); /* version */
-    AV_W8(p + 1, h->sps.data[0]); /* profile */
-    AV_W8(p + 2, h->sps.data[1]); /* profile compat */
-    AV_W8(p + 3, h->sps.data[2]); /* level */
+    AV_W8(p + 1, h->ps.sps->data[0]); /* profile */
+    AV_W8(p + 2, h->ps.sps->data[1]); /* profile compat */
+    AV_W8(p + 3, h->ps.sps->data[2]); /* level */
     AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
     AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
-    AV_WB16(p + 6, h->sps.data_size + 1);
+    AV_WB16(p + 6, h->ps.sps->data_size + 1);
     AV_W8(p + 8, NAL_SPS | (3 << 5)); // NAL unit header
-    memcpy(p + 9, h->sps.data, h->sps.data_size);
-    p += 9 + h->sps.data_size;
+    memcpy(p + 9, h->ps.sps->data, h->ps.sps->data_size);
+    p += 9 + h->ps.sps->data_size;
     AV_W8(p + 0, 1); /* number of pps */
     AV_WB16(p + 1, h->pps.data_size + 1);
     AV_W8(p + 3, NAL_PPS | (3 << 5)); // NAL unit header


======================================================================

diff --cc libavcodec/dxva2_h264.c
index bcba875,cd13486..bd1fa1e
--- a/libavcodec/dxva2_h264.c
+++ b/libavcodec/dxva2_h264.c
@@@ -96,24 -96,24 +98,24 @@@ static void fill_picture_parameters(con
  
      pp->wFrameWidthInMbsMinus1        = h->mb_width  - 1;
      pp->wFrameHeightInMbsMinus1       = h->mb_height - 1;
-     pp->num_ref_frames                = h->sps.ref_frame_count;
+     pp->num_ref_frames                = sps->ref_frame_count;
  
      pp->wBitFields                    = ((h->picture_structure != PICT_FRAME) <<  0) |
-                                         ((h->sps.mb_aff &&
+                                         ((sps->mb_aff &&
                                          (h->picture_structure == PICT_FRAME)) <<  1) |
-                                         (h->sps.residual_color_transform_flag <<  2) |
+                                         (sps->residual_color_transform_flag   <<  2) |
 -                                        /* sp_for_switch_flag (not implemented by Libav) */
 +                                        /* sp_for_switch_flag (not implemented by FFmpeg) */
                                          (0                                    <<  3) |
-                                         (h->sps.chroma_format_idc             <<  4) |
+                                         (sps->chroma_format_idc               <<  4) |
                                          ((h->nal_ref_idc != 0)                <<  6) |
-                                         (h->pps.constrained_intra_pred        <<  7) |
-                                         (h->pps.weighted_pred                 <<  8) |
-                                         (h->pps.weighted_bipred_idc           <<  9) |
+                                         (pps->constrained_intra_pred          <<  7) |
+                                         (pps->weighted_pred                   <<  8) |
+                                         (pps->weighted_bipred_idc             <<  9) |
                                          /* MbsConsecutiveFlag */
                                          (1                                    << 11) |
-                                         (h->sps.frame_mbs_only_flag           << 12) |
-                                         (h->pps.transform_8x8_mode            << 13) |
-                                         ((h->sps.level_idc >= 31)             << 14) |
+                                         (sps->frame_mbs_only_flag             << 12) |
+                                         (pps->transform_8x8_mode              << 13) |
+                                         ((sps->level_idc >= 31)               << 14) |
                                          /* IntraPicFlag (Modified if we detect a non
                                           * intra slice in dxva2_h264_decode_slice) */
                                          (1                                    << 15);
@@@ -135,35 -135,35 +137,36 @@@
      if ((h->picture_structure & PICT_BOTTOM_FIELD) &&
          current_picture->field_poc[1] != INT_MAX)
          pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1];
-     pp->pic_init_qs_minus26           = h->pps.init_qs - 26;
-     pp->chroma_qp_index_offset        = h->pps.chroma_qp_index_offset[0];
-     pp->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
+     pp->pic_init_qs_minus26           = pps->init_qs - 26;
+     pp->chroma_qp_index_offset        = pps->chroma_qp_index_offset[0];
+     pp->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1];
      pp->ContinuationFlag              = 1;
-     pp->pic_init_qp_minus26           = h->pps.init_qp - 26;
-     pp->num_ref_idx_l0_active_minus1  = h->pps.ref_count[0] - 1;
-     pp->num_ref_idx_l1_active_minus1  = h->pps.ref_count[1] - 1;
+     pp->pic_init_qp_minus26           = pps->init_qp - 26;
+     pp->num_ref_idx_l0_active_minus1  = pps->ref_count[0] - 1;
+     pp->num_ref_idx_l1_active_minus1  = pps->ref_count[1] - 1;
      pp->Reserved8BitsA                = 0;
      pp->frame_num                     = h->frame_num;
-     pp->log2_max_frame_num_minus4     = h->sps.log2_max_frame_num - 4;
-     pp->pic_order_cnt_type            = h->sps.poc_type;
-     if (h->sps.poc_type == 0)
-         pp->log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;
-     else if (h->sps.poc_type == 1)
-         pp->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
-     pp->direct_8x8_inference_flag     = h->sps.direct_8x8_inference_flag;
-     pp->entropy_coding_mode_flag      = h->pps.cabac;
-     pp->pic_order_present_flag        = h->pps.pic_order_present;
-     pp->num_slice_groups_minus1       = h->pps.slice_group_count - 1;
-     pp->slice_group_map_type          = h->pps.mb_slice_group_map_type;
-     pp->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
-     pp->redundant_pic_cnt_present_flag= h->pps.redundant_pic_cnt_present;
+     pp->log2_max_frame_num_minus4     = sps->log2_max_frame_num - 4;
+     pp->pic_order_cnt_type            = sps->poc_type;
+     if (sps->poc_type == 0)
+         pp->log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4;
+     else if (sps->poc_type == 1)
+         pp->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
+     pp->direct_8x8_inference_flag     = sps->direct_8x8_inference_flag;
+     pp->entropy_coding_mode_flag      = pps->cabac;
+     pp->pic_order_present_flag        = pps->pic_order_present;
+     pp->num_slice_groups_minus1       = pps->slice_group_count - 1;
+     pp->slice_group_map_type          = pps->mb_slice_group_map_type;
+     pp->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
+     pp->redundant_pic_cnt_present_flag= pps->redundant_pic_cnt_present;
      pp->Reserved8BitsB                = 0;
 -    pp->slice_group_change_rate_minus1= 0;  /* XXX not implemented by Libav */
 -    //pp->SliceGroupMap[810];               /* XXX not implemented by Libav */
 +    pp->slice_group_change_rate_minus1= 0;  /* XXX not implemented by FFmpeg */
 +    //pp->SliceGroupMap[810];               /* XXX not implemented by FFmpeg */
  }
  
  static void fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx, const H264Context *h, DXVA_Qmatrix_H264 *qm)
  {
++    const PPS *pps = h->ps.pps;
      unsigned i, j;
      memset(qm, 0, sizeof(*qm));
      if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) {
@@@ -281,8 -281,8 +284,8 @@@ static void fill_slice_long(AVCodecCont
              }
          }
      }
 -    slice->slice_qs_delta    = 0; /* XXX not implemented by Libav */
 +    slice->slice_qs_delta    = 0; /* XXX not implemented by FFmpeg */
-     slice->slice_qp_delta    = sl->qscale - h->pps.init_qp;
+     slice->slice_qp_delta    = sl->qscale - h->ps.pps->init_qp;
      slice->redundant_pic_cnt = sl->redundant_pic_count;
      if (sl->slice_type == AV_PICTURE_TYPE_B)
          slice->direct_spatial_mv_pred_flag = sl->direct_spatial_mv_pred;
diff --cc libavcodec/h264.c
index c011527,81c1e81..0de6d91
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@@ -57,12 -54,6 +57,12 @@@ static int h264_decode_end(AVCodecConte
  
  const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
  
 +int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
 +{
 +    H264Context *h = avctx->priv_data;
-     return h ? h->sps.num_reorder_frames : 0;
++    return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
 +}
 +
  static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
                                int (*mv)[2][4][2],
                                int mb_x, int mb_y, int mb_intra, int mb_skipped)
@@@ -422,12 -394,6 +419,11 @@@ static int h264_init_context(AVCodecCon
      int i;
  
      h->avctx                 = avctx;
 +    h->backup_width          = -1;
 +    h->backup_height         = -1;
 +    h->backup_pix_fmt        = AV_PIX_FMT_NONE;
-     h->dequant_coeff_pps     = -1;
 +    h->current_sps_id        = -1;
 +    h->cur_chroma_format_idc = -1;
  
      h->picture_structure     = PICT_FRAME;
      h->slice_context_count   = 1;
@@@ -507,16 -463,16 +503,16 @@@ av_cold int ff_h264_decode_init(AVCodec
      }
  
      if (avctx->extradata_size > 0 && avctx->extradata) {
 -       ret = ff_h264_decode_extradata(h);
 -       if (ret < 0) {
 -           ff_h264_free_context(h);
 -           return ret;
 -       }
 +        ret = ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
 +        if (ret < 0) {
 +            h264_decode_end(avctx);
 +            return ret;
 +        }
      }
  
-     if (h->sps.bitstream_restriction_flag &&
-         h->avctx->has_b_frames < h->sps.num_reorder_frames) {
-         h->avctx->has_b_frames = h->sps.num_reorder_frames;
+     if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
+         h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
+         h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
          h->low_delay           = 0;
      }
  
@@@ -640,7 -588,7 +637,7 @@@ static void decode_postinit(H264Contex
          /* Derive top_field_first from field pocs. */
          cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
      } else {
-         if (h->sps.pic_struct_present_flag) {
 -        if (cur->f->interlaced_frame || sps->pic_struct_present_flag) {
++        if (sps->pic_struct_present_flag) {
              /* Use picture timing SEI information. Even if it is a
               * information of a past frame, better than nothing. */
              if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
@@@ -737,37 -681,12 +734,37 @@@
      // FIXME do something with unavailable reference frames
  
      /* Sort B-frames into display order */
-     if (h->sps.bitstream_restriction_flag ||
+     if (sps->bitstream_restriction_flag ||
 -        h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
 +        h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
-         h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, h->sps.num_reorder_frames);
+         h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
      }
      h->low_delay = !h->avctx->has_b_frames;
  
 +    for (i = 0; 1; i++) {
 +        if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
 +            if(i)
 +                h->last_pocs[i-1] = cur->poc;
 +            break;
 +        } else if(i) {
 +            h->last_pocs[i-1]= h->last_pocs[i];
 +        }
 +    }
 +    out_of_order = MAX_DELAYED_PIC_COUNT - i;
 +    if(   cur->f->pict_type == AV_PICTURE_TYPE_B
 +       || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
 +        out_of_order = FFMAX(out_of_order, 1);
 +    if (out_of_order == MAX_DELAYED_PIC_COUNT) {
 +        av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
 +        for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
 +            h->last_pocs[i] = INT_MIN;
 +        h->last_pocs[0] = cur->poc;
 +        cur->mmco_reset = 1;
-     } else if(h->avctx->has_b_frames < out_of_order && !h->sps.bitstream_restriction_flag){
++    } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
 +        av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order);
 +        h->avctx->has_b_frames = out_of_order;
 +        h->low_delay = 0;
 +    }
 +
      pics = 0;
      while (h->delayed_pic[pics])
          pics++;
@@@ -1149,23 -1063,15 +1147,23 @@@ again
              if ((err = ff_h264_decode_slice_header(h, sl)))
                  break;
  
 -            if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
 -                h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) &
 -                                    ((1 << h->ps.sps->log2_max_frame_num) - 1);
 +            if (h->sei_recovery_frame_cnt >= 0) {
 +                if (h->frame_num != h->sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
 +                    h->valid_recovery_point = 1;
 +
 +                if (   h->recovery_frame < 0
-                     || av_mod_uintp2(h->recovery_frame - h->frame_num, h->sps.log2_max_frame_num) > h->sei_recovery_frame_cnt) {
-                     h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->sps.log2_max_frame_num);
++                    || av_mod_uintp2(h->recovery_frame - h->frame_num, h->ps.sps->log2_max_frame_num) > h->sei_recovery_frame_cnt) {
++                    h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
 +
 +                    if (!h->valid_recovery_point)
 +                        h->recovery_frame = h->frame_num;
 +                }
              }
  
 -            h->cur_pic_ptr->f->key_frame |=
 -                (nal->type == NAL_IDR_SLICE) || (h->sei_recovery_frame_cnt >= 0);
 +            h->cur_pic_ptr->f->key_frame |= (nal->type == NAL_IDR_SLICE);
  
 -            if (nal->type == NAL_IDR_SLICE || h->recovery_frame == h->frame_num) {
 +            if (nal->type == NAL_IDR_SLICE ||
 +                (h->recovery_frame == h->frame_num && nal->ref_idc)) {
                  h->recovery_frame         = -1;
                  h->cur_pic_ptr->recovered = 1;
              }
@@@ -1225,22 -1118,14 +1223,21 @@@
              if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                  goto end;
              break;
--        case NAL_SPS:
-             h->gb = nal->gb;
-             if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
 -            ret = ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps);
 -            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
 -                goto end;
++        case NAL_SPS: {
++            GetBitContext tmp_gb = nal->gb;
++            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
 +                break;
 +            av_log(h->avctx, AV_LOG_DEBUG,
 +                   "SPS decoding failure, trying again with the complete NAL\n");
-             init_get_bits8(&h->gb, nal->raw_data + 1, nal->raw_size - 1);
-             if (ff_h264_decode_seq_parameter_set(h, 0) >= 0)
++            init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
++            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
 +                break;
-             h->gb = nal->gb;
-             ff_h264_decode_seq_parameter_set(h, 1);
- 
++            ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
              break;
++        }
          case NAL_PPS:
-             h->gb = nal->gb;
-             ret = ff_h264_decode_picture_parameter_set(h, nal->size_bits);
+             ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
+                                                        nal->size_bits);
              if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                  goto end;
              break;
@@@ -1291,47 -1169,6 +1288,50 @@@
  
      ret = 0;
  end:
 +
 +#if CONFIG_ERROR_RESILIENCE
 +    /*
 +     * FIXME: Error handling code does not seem to support interlaced
 +     * when slices span multiple rows
 +     * The ff_er_add_slice calls don't work right for bottom
 +     * fields; they cause massive erroneous error concealing
 +     * Error marking covers both fields (top and bottom).
 +     * This causes a mismatched s->error_count
 +     * and a bad error table. Further, the error count goes to
 +     * INT_MAX when called for bottom field, because mb_y is
 +     * past end by one (callers fault) and resync_mb_y != 0
 +     * causes problems for the first MB line, too.
 +     */
-     if (!FIELD_PICTURE(h) && h->current_slice && !h->sps.new && h->enable_er) {
++    if (!FIELD_PICTURE(h) && h->current_slice &&
++        h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data &&
++        h->enable_er) {
++
 +        H264SliceContext *sl = h->slice_ctx;
 +        int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
 +
 +        ff_h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr);
 +
 +        if (use_last_pic) {
 +            ff_h264_set_erpic(&sl->er.last_pic, &h->last_pic_for_ec);
 +            sl->ref_list[0][0].parent = &h->last_pic_for_ec;
 +            memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
 +            memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
 +            sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
 +        } else if (sl->ref_count[0]) {
 +            ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
 +        } else
 +            ff_h264_set_erpic(&sl->er.last_pic, NULL);
 +
 +        if (sl->ref_count[1])
 +            ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);
 +
 +        sl->er.ref_count = sl->ref_count[0];
 +
 +        ff_er_frame_end(&sl->er);
 +        if (use_last_pic)
 +            memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
 +    }
 +#endif /* CONFIG_ERROR_RESILIENCE */
      /* clean up */
      if (h->cur_pic_ptr && !h->droppable) {
          ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
@@@ -1581,15 -1317,12 +1581,18 @@@ av_cold void ff_h264_free_context(H264C
      av_freep(&h->slice_ctx);
      h->nb_slice_ctx = 0;
  
 +    h->a53_caption_size = 0;
 +    av_freep(&h->a53_caption);
 +
      for (i = 0; i < MAX_SPS_COUNT; i++)
-         av_freep(h->sps_buffers + i);
+         av_buffer_unref(&h->ps.sps_list[i]);
  
      for (i = 0; i < MAX_PPS_COUNT; i++)
-         av_freep(h->pps_buffers + i);
+         av_buffer_unref(&h->ps.pps_list[i]);
+ 
++    av_buffer_unref(&h->ps.sps_ref);
++    av_buffer_unref(&h->ps.pps_ref);
 +
      ff_h2645_packet_uninit(&h->pkt);
  }
  
diff --cc libavcodec/h264.h
index 33bc509,6a026aa..264b447
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@@ -94,12 -93,11 +95,12 @@@
  #define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
  
  #ifndef CABAC
- #define CABAC(h) (h)->pps.cabac
 -#define CABAC(h) h->ps.pps->cabac
++#define CABAC(h) (h)->ps.pps->cabac
  #endif
  
- #define CHROMA(h)    ((h)->sps.chroma_format_idc)
- #define CHROMA422(h) ((h)->sps.chroma_format_idc == 2)
- #define CHROMA444(h) ((h)->sps.chroma_format_idc == 3)
 -#define CHROMA422(h) (h->ps.sps->chroma_format_idc == 2)
 -#define CHROMA444(h) (h->ps.sps->chroma_format_idc == 3)
++#define CHROMA(h)    ((h)->ps.sps->chroma_format_idc)
++#define CHROMA422(h) ((h)->ps.sps->chroma_format_idc == 2)
++#define CHROMA444(h) ((h)->ps.sps->chroma_format_idc == 3)
  
  #define EXTENDED_SAR       255
  
@@@ -231,9 -215,6 +232,8 @@@ typedef struct SPS 
      int bit_depth_chroma;                 ///< bit_depth_chroma_minus8 + 8
      int residual_color_transform_flag;    ///< residual_colour_transform_flag
      int constraint_set_flags;             ///< constraint_set[0-3]_flag
-     int new;                              ///< flag to keep track if the decoder context needs re-init due to changed SPS
 +    uint8_t data[4096];
 +    size_t data_size;
  } SPS;
  
  /**
@@@ -257,41 -238,26 +257,58 @@@ typedef struct PPS 
      int transform_8x8_mode;         ///< transform_8x8_mode_flag
      uint8_t scaling_matrix4[6][16];
      uint8_t scaling_matrix8[6][64];
 -    uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
 +    uint8_t chroma_qp_table[2][QP_MAX_NUM+1];  ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
      int chroma_qp_diff;
 +    uint8_t data[4096];
 +    size_t data_size;
+ 
+     uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
+     uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
+     uint32_t(*dequant4_coeff[6])[16];
+     uint32_t(*dequant8_coeff[6])[64];
  } PPS;
  
+ typedef struct H264ParamSets {
+     AVBufferRef *sps_list[MAX_SPS_COUNT];
+     AVBufferRef *pps_list[MAX_PPS_COUNT];
+ 
++    AVBufferRef *pps_ref;
++    AVBufferRef *sps_ref;
+     /* currently active parameters sets */
+     const PPS *pps;
+     // FIXME this should properly be const
+     SPS *sps;
+ } H264ParamSets;
+ 
  /**
 + * Frame Packing Arrangement Type
 + */
 +typedef struct FPA {
 +    int         frame_packing_arrangement_id;
 +    int         frame_packing_arrangement_cancel_flag; ///< is previous arrangement canceled, -1 if never received
 +    SEI_FpaType frame_packing_arrangement_type;
 +    int         frame_packing_arrangement_repetition_period;
 +    int         content_interpretation_type;
 +    int         quincunx_sampling_flag;
 +} FPA;
 +
 +/**
 + *     Green MetaData Information Type
 + */
 +typedef struct GreenMetaData {
 +    uint8_t  green_metadata_type;
 +    uint8_t  period_type;
 +    uint16_t  num_seconds;
 +    uint16_t  num_pictures;
 +    uint8_t percent_non_zero_macroblocks;
 +    uint8_t percent_intra_coded_macroblocks;
 +    uint8_t percent_six_tap_filtering;
 +    uint8_t percent_alpha_point_deblocking_instance;
 +    uint8_t xsd_metric_type;
 +    uint16_t xsd_metric_value;
 +} GreenMetaData;
 +
 +/**
   * Memory management control operation opcode.
   */
  typedef enum MMCOOpcode {
@@@ -570,18 -521,6 +587,11 @@@ typedef struct H264Context 
      uint32_t *mb2br_xy;
      int b_stride;       // FIXME use s->b4_stride
  
 +
 +    unsigned current_sps_id; ///< id of the current SPS
-     SPS sps; ///< current sps
-     PPS pps; ///< current pps
 +
 +    int au_pps_id; ///< pps_id of current access unit
 +
-     uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16]; // FIXME should these be moved down?
-     uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
-     uint32_t(*dequant4_coeff[6])[16];
-     uint32_t(*dequant8_coeff[6])[64];
- 
      uint16_t *slice_table;      ///< slice_table_base + 2*mb_stride + 1
  
      // interlacing specific flags
@@@ -848,7 -760,8 +855,8 @@@ int ff_h264_decode_sei(H264Context *h)
  /**
   * Decode SPS
   */
- int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation);
+ int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
 -                                     H264ParamSets *ps);
++                                     H264ParamSets *ps, int ignore_truncation);
  
  /**
   * compute profile from sps
@@@ -910,7 -824,7 +919,7 @@@ int ff_h264_decode_mb_cabac(const H264C
  
  void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl);
  
--void ff_h264_init_dequant_tables(H264Context *h);
++void ff_h264_init_dequant_tables(PPS *pps, const SPS *sps);
  
  void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl);
  void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl);
diff --cc libavcodec/h264_cabac.c
index 3df0f70,f831a68..ddabe3b
--- a/libavcodec/h264_cabac.c
+++ b/libavcodec/h264_cabac.c
@@@ -2027,10 -2022,9 +2028,10 @@@ decode_intra_mb
      h->slice_table[mb_xy] = sl->slice_num;
  
      if(IS_INTRA_PCM(mb_type)) {
-         const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
-                             h->sps.bit_depth_luma >> 3;
+         const int mb_size = ff_h264_mb_sizes[sps->chroma_format_idc] *
+                             sps->bit_depth_luma >> 3;
          const uint8_t *ptr;
 +        int ret;
  
          // We assume these blocks are very rare so we do not optimize it.
          // FIXME The two following lines get the bitstream position in the cabac
diff --cc libavcodec/h264_cavlc.c
index be53914,10511fb..95b3778
--- a/libavcodec/h264_cavlc.c
+++ b/libavcodec/h264_cavlc.c
@@@ -1141,9 -1129,7 +1141,9 @@@ decode_intra_mb
              if (decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 2) < 0 ) {
                  return -1;
              }
 -        } else if (CHROMA422(h)) {
 +        } else {
-             const int num_c8x8 = h->sps.chroma_format_idc;
++            const int num_c8x8 = h->ps.sps->chroma_format_idc;
 +
              if(cbp&0x30){
                  for(chroma_idx=0; chroma_idx<2; chroma_idx++)
                      if (decode_residual(h, sl, gb, sl->mb + ((256 + 16*16*chroma_idx) << pixel_shift),
@@@ -1156,9 -1141,9 +1156,9 @@@
  
              if(cbp&0x20){
                  for(chroma_idx=0; chroma_idx<2; chroma_idx++){
-                     const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
+                     const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
                      int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) << pixel_shift);
 -                    for (i8x8 = 0; i8x8 < 2; i8x8++) {
 +                    for (i8x8 = 0; i8x8<num_c8x8; i8x8++) {
                          for (i4x4 = 0; i4x4 < 4; i4x4++) {
                              const int index = 16 + 16*chroma_idx + 8*i8x8 + i4x4;
                              if (decode_residual(h, sl, gb, mb, index, scan + 1, qmul, 15) < 0)
diff --cc libavcodec/h264_loopfilter.c
index 0014927,fccfc66..7431b5e
--- a/libavcodec/h264_loopfilter.c
+++ b/libavcodec/h264_loopfilter.c
@@@ -419,8 -421,8 +419,8 @@@ void ff_h264_filter_mb_fast(const H264C
                              uint8_t *img_cb, uint8_t *img_cr,
                              unsigned int linesize, unsigned int uvlinesize)
  {
 -    assert(!FRAME_MBAFF(h));
 +    av_assert2(!FRAME_MBAFF(h));
-     if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) {
+     if(!h->h264dsp.h264_loop_filter_strength || h->ps.pps->chroma_qp_diff) {
          ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
          return;
      }
@@@ -723,8 -721,8 +723,8 @@@ void ff_h264_filter_mb(const H264Contex
      const int mb_type = h->cur_pic.mb_type[mb_xy];
      const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
      int first_vertical_edge_done = 0;
 -    int chroma = !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
 +    int chroma = CHROMA(h) && !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
-     int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
+     int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
      int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
      int b = 52 + sl->slice_beta_offset - qp_bd_offset;
  
diff --cc libavcodec/h264_mb.c
index 3c5c932,b6773e6..75535ad
--- a/libavcodec/h264_mb.c
+++ b/libavcodec/h264_mb.c
@@@ -635,13 -634,8 +635,13 @@@ static av_always_inline void hl_decode_
              for (i = 0; i < 16; i += 4) {
                  uint8_t *const ptr = dest_y + block_offset[i];
                  const int dir      = sl->intra4x4_pred_mode_cache[scan8[i]];
-                 if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
+                 if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
 -                    h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
 +                    if (h->x264_build != -1) {
 +                        h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
 +                    } else
 +                        h->hpc.pred8x8l_filter_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift),
 +                                                        (sl-> topleft_samples_available << i) & 0x8000,
 +                                                        (sl->topright_samples_available << i) & 0x4000, linesize);
                  } else {
                      const int nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
                      h->hpc.pred8x8l[dir](ptr, (sl->topleft_samples_available << i) & 0x8000,
diff --cc libavcodec/h264_mb_template.c
index f7f8a93,7da4f41..f582b00
--- a/libavcodec/h264_mb_template.c
+++ b/libavcodec/h264_mb_template.c
@@@ -96,8 -96,8 +96,8 @@@ static av_noinline void FUNC(hl_decode_
      }
  
      if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
-         const int bit_depth = h->sps.bit_depth_luma;
++        const int bit_depth = h->ps.sps->bit_depth_luma;
          if (PIXEL_SHIFT) {
 -            const int bit_depth = h->ps.sps->bit_depth_luma;
              int j;
              GetBitContext gb;
              init_get_bits(&gb, sl->intra_pcm_ptr,
@@@ -109,13 -109,16 +109,13 @@@
                      tmp_y[j] = get_bits(&gb, bit_depth);
              }
              if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
-                 if (!h->sps.chroma_format_idc) {
+                 if (!h->ps.sps->chroma_format_idc) {
                      for (i = 0; i < block_h; i++) {
                          uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
 -                        for (j = 0; j < 8; j++)
 -                            tmp_cb[j] = 1 << (bit_depth - 1);
 -                    }
 -                    for (i = 0; i < block_h; i++) {
                          uint16_t *tmp_cr = (uint16_t *)(dest_cr + i * uvlinesize);
 -                        for (j = 0; j < 8; j++)
 -                            tmp_cr[j] = 1 << (bit_depth - 1);
 +                        for (j = 0; j < 8; j++) {
 +                            tmp_cb[j] = tmp_cr[j] = 1 << (bit_depth - 1);
 +                        }
                      }
                  } else {
                      for (i = 0; i < block_h; i++) {
@@@ -134,10 -137,10 +134,10 @@@
              for (i = 0; i < 16; i++)
                  memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16);
              if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
-                 if (!h->sps.chroma_format_idc) {
+                 if (!h->ps.sps->chroma_format_idc) {
 -                    for (i = 0; i < block_h; i++) {
 -                        memset(dest_cb + i * uvlinesize, 128, 8);
 -                        memset(dest_cr + i * uvlinesize, 128, 8);
 +                    for (i = 0; i < 8; i++) {
 +                        memset(dest_cb + i * uvlinesize, 1 << (bit_depth - 1), 8);
 +                        memset(dest_cr + i * uvlinesize, 1 << (bit_depth - 1), 8);
                      }
                  } else {
                      const uint8_t *src_cb = sl->intra_pcm_ptr + 256;
diff --cc libavcodec/h264_parser.c
index 493ed19,cd37d31..0913452
--- a/libavcodec/h264_parser.c
+++ b/libavcodec/h264_parser.c
@@@ -154,8 -122,8 +155,8 @@@ static int scan_mmco_reset(AVCodecParse
      if (slice_type_nos == AV_PICTURE_TYPE_B)
          get_bits1(gb); // direct_spatial_mv_pred
  
-     if (ff_h264_parse_ref_count(&list_count, ref_count, gb, &h->pps,
+     if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps,
 -                                slice_type_nos, h->picture_structure) < 0)
 +                                slice_type_nos, h->picture_structure, h->avctx) < 0)
          return AVERROR_INVALIDDATA;
  
      if (slice_type_nos != AV_PICTURE_TYPE_I) {
@@@ -252,26 -220,14 +253,27 @@@ static inline int parse_nal_units(AVCod
      if (!buf_size)
          return 0;
  
 +    buf_index     = 0;
 +    next_avc      = h->is_avc ? 0 : buf_size;
      for (;;) {
+         const SPS *sps;
 -        int src_length, consumed;
 -        buf = avpriv_find_start_code(buf, buf_end, &state);
 -        if (buf >= buf_end)
 -            break;
 -        --buf;
 -        src_length = buf_end - buf;
 +        int src_length, consumed, nalsize = 0;
 +
 +        if (buf_index >= next_avc) {
 +            nalsize = get_avc_nalsize(h, buf, buf_size, &buf_index);
 +            if (nalsize < 0)
 +                break;
 +            next_avc = buf_index + nalsize;
 +        } else {
 +            buf_index = find_start_code(buf, buf_size, buf_index, next_avc);
 +            if (buf_index >= buf_size)
 +                break;
 +            if (buf_index >= next_avc)
 +                continue;
 +        }
 +        src_length = next_avc - buf_index;
 +
 +        state = buf[buf_index];
          switch (state & 0x1f) {
          case NAL_SLICE:
          case NAL_IDR_SLICE:
@@@ -307,13 -262,14 +309,19 @@@
  
          switch (h->nal_unit_type) {
          case NAL_SPS:
-             ff_h264_decode_seq_parameter_set(h, 0);
 -            ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps);
++            ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps, 0);
              break;
          case NAL_PPS:
-             ff_h264_decode_picture_parameter_set(h, h->gb.size_in_bits);
+             ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps,
+                                                  nal.size_bits);
              break;
          case NAL_SEI:
--            ff_h264_decode_sei(h);
++            {
++                H264ParamSets ps = h->ps;
++                h->ps = p->ps;
++                ff_h264_decode_sei(h);
++                h->ps = ps;
++            }
              break;
          case NAL_IDR_SLICE:
              s->key_frame = 1;
@@@ -342,22 -298,24 +350,28 @@@
                         "non-existing PPS %u referenced\n", pps_id);
                  goto fail;
              }
-             h->pps = *h->pps_buffers[pps_id];
-             if (!h->sps_buffers[h->pps.sps_id]) {
+             p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
+             if (!p->ps.sps_list[p->ps.pps->sps_id]) {
                  av_log(h->avctx, AV_LOG_ERROR,
-                        "non-existing SPS %u referenced\n", h->pps.sps_id);
+                        "non-existing SPS %u referenced\n", p->ps.pps->sps_id);
                  goto fail;
              }
-             h->sps       = *h->sps_buffers[h->pps.sps_id];
-             h->frame_num = get_bits(&nal.gb, h->sps.log2_max_frame_num);
+             p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
+ 
+             h->ps.sps = p->ps.sps;
+             h->ps.pps = p->ps.pps;
+             sps = p->ps.sps;
  
-             if(h->sps.ref_frame_count <= 1 && h->pps.ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
++            // heuristic to detect non marked keyframes
++            if (h->ps.sps->ref_frame_count <= 1 && h->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
 +                s->key_frame = 1;
 +
-             s->coded_width  = 16 * h->sps.mb_width;
-             s->coded_height = 16 * h->sps.mb_height;
-             s->width        = s->coded_width  - (h->sps.crop_right + h->sps.crop_left);
-             s->height       = s->coded_height - (h->sps.crop_top   + h->sps.crop_bottom);
+             h->frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
+ 
+             s->coded_width  = 16 * sps->mb_width;
+             s->coded_height = 16 * sps->mb_height;
+             s->width        = s->coded_width  - (sps->crop_right + sps->crop_left);
+             s->height       = s->coded_height - (sps->crop_top   + sps->crop_bottom);
              if (s->width <= 0 || s->height <= 0) {
                  s->width  = s->coded_width;
                  s->height = s->coded_height;
@@@ -397,11 -355,11 +411,11 @@@
              }
  
              if (h->nal_unit_type == NAL_IDR_SLICE)
 -                get_ue_golomb(&nal.gb); /* idr_pic_id */
 +                get_ue_golomb_long(&nal.gb); /* idr_pic_id */
-             if (h->sps.poc_type == 0) {
-                 h->poc_lsb = get_bits(&nal.gb, h->sps.log2_max_poc_lsb);
+             if (sps->poc_type == 0) {
+                 h->poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
  
-                 if (h->pps.pic_order_present == 1 &&
+                 if (p->ps.pps->pic_order_present == 1 &&
                      h->picture_structure == PICT_FRAME)
                      h->delta_poc_bottom = get_se_golomb(&nal.gb);
              }
@@@ -533,14 -488,15 +547,35 @@@ static int h264_parse(AVCodecParserCont
      if (!p->got_first) {
          p->got_first = 1;
          if (avctx->extradata_size) {
++            int i;
++
              h->avctx = avctx;
 -            // must be done like in the decoder.
 -            // otherwise opening the parser, creating extradata,
 -            // and then closing and opening again
 +            // must be done like in decoder, otherwise opening the parser,
 +            // letting it create extradata and then closing and opening again
              // will cause has_b_frames to be always set.
 -            // NB: estimate_timings_from_pts behaves exactly like this.
 +            // Note that estimate_timings_from_pts does exactly this.
              if (!avctx->has_b_frames)
                  h->low_delay = 1;
 -            ff_h264_decode_extradata(h);
 +            ff_h264_decode_extradata(h, avctx->extradata, avctx->extradata_size);
++
++            for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++) {
++                av_buffer_unref(&p->ps.sps_list[i]);
++                if (h->ps.sps_list[i]) {
++                    p->ps.sps_list[i] = av_buffer_ref(h->ps.sps_list[i]);
++                    if (!p->ps.sps_list[i])
++                        return AVERROR(ENOMEM);
++                }
++            }
++            for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++) {
++                av_buffer_unref(&p->ps.pps_list[i]);
++                if (h->ps.pps_list[i]) {
++                    p->ps.pps_list[i] = av_buffer_ref(h->ps.pps_list[i]);
++                    if (!p->ps.pps_list[i])
++                        return AVERROR(ENOMEM);
++                }
++            }
++
++            p->ps.sps = h->ps.sps;
          }
      }
  
@@@ -626,9 -574,16 +661,16 @@@ static void h264_close(AVCodecParserCon
      H264ParseContext *p = s->priv_data;
      H264Context      *h = &p->h;
      ParseContext *pc = &p->pc;
+     int i;
  
 -    av_free(pc->buffer);
 +    av_freep(&pc->buffer);
      ff_h264_free_context(h);
+ 
+     for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++)
+         av_buffer_unref(&p->ps.sps_list[i]);
+ 
+     for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++)
+         av_buffer_unref(&p->ps.pps_list[i]);
  }
  
  static av_cold int init(AVCodecParserContext *s)
diff --cc libavcodec/h264_ps.c
index e0f0946,99ede76..521f560
--- a/libavcodec/h264_ps.c
+++ b/libavcodec/h264_ps.c
@@@ -84,13 -104,36 +84,33 @@@ static const int level_max_dpb_mbs[][2
      { 52, 184320    },
  };
  
- static inline int decode_hrd_parameters(H264Context *h, SPS *sps)
+ static void remove_pps(H264ParamSets *s, int id)
+ {
 -    if (s->pps_list[id] && s->pps == (const PPS*)s->pps_list[id]->data)
 -        s->pps = NULL;
+     av_buffer_unref(&s->pps_list[id]);
+ }
+ 
+ static void remove_sps(H264ParamSets *s, int id)
+ {
++#if 0
+     int i;
+     if (s->sps_list[id]) {
 -        if (s->sps == (SPS*)s->sps_list[id]->data)
 -            s->sps = NULL;
 -
+         /* drop all PPS that depend on this SPS */
+         for (i = 0; i < FF_ARRAY_ELEMS(s->pps_list); i++)
+             if (s->pps_list[i] && ((PPS*)s->pps_list[i]->data)->sps_id == id)
+                 remove_pps(s, i);
+     }
++#endif
+     av_buffer_unref(&s->sps_list[id]);
+ }
+ 
+ static inline int decode_hrd_parameters(GetBitContext *gb, AVCodecContext *avctx,
+                                         SPS *sps)
  {
      int cpb_count, i;
-     cpb_count = get_ue_golomb_31(&h->gb) + 1;
+     cpb_count = get_ue_golomb_31(gb) + 1;
  
      if (cpb_count > 32U) {
-         av_log(h->avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count);
+         av_log(avctx, AV_LOG_ERROR, "cpb_count %d invalid\n", cpb_count);
          return AVERROR_INVALIDDATA;
      }
  
@@@ -114,17 -158,17 +135,17 @@@ static inline int decode_vui_parameters
      int aspect_ratio_info_present_flag;
      unsigned int aspect_ratio_idc;
  
-     aspect_ratio_info_present_flag = get_bits1(&h->gb);
+     aspect_ratio_info_present_flag = get_bits1(gb);
  
      if (aspect_ratio_info_present_flag) {
-         aspect_ratio_idc = get_bits(&h->gb, 8);
+         aspect_ratio_idc = get_bits(gb, 8);
          if (aspect_ratio_idc == EXTENDED_SAR) {
-             sps->sar.num = get_bits(&h->gb, 16);
-             sps->sar.den = get_bits(&h->gb, 16);
+             sps->sar.num = get_bits(gb, 16);
+             sps->sar.den = get_bits(gb, 16);
 -        } else if (aspect_ratio_idc < FF_ARRAY_ELEMS(pixel_aspect)) {
 -            sps->sar = pixel_aspect[aspect_ratio_idc];
 +        } else if (aspect_ratio_idc < FF_ARRAY_ELEMS(ff_h264_pixel_aspect)) {
 +            sps->sar = ff_h264_pixel_aspect[aspect_ratio_idc];
          } else {
-             av_log(h->avctx, AV_LOG_ERROR, "illegal aspect ratio\n");
+             av_log(avctx, AV_LOG_ERROR, "illegal aspect ratio\n");
              return AVERROR_INVALIDDATA;
          }
      } else {
@@@ -155,58 -199,49 +176,58 @@@
      }
  
      /* chroma_location_info_present_flag */
-     if (get_bits1(&h->gb)) {
+     if (get_bits1(gb)) {
          /* chroma_sample_location_type_top_field */
-         h->avctx->chroma_sample_location = get_ue_golomb(&h->gb) + 1;
-         get_ue_golomb(&h->gb);  /* chroma_sample_location_type_bottom_field */
+         avctx->chroma_sample_location = get_ue_golomb(gb) + 1;
+         get_ue_golomb(gb);  /* chroma_sample_location_type_bottom_field */
      }
  
-     if (show_bits1(&h->gb) && get_bits_left(&h->gb) < 10) {
-         av_log(h->avctx, AV_LOG_WARNING, "Truncated VUI\n");
++    if (show_bits1(gb) && get_bits_left(gb) < 10) {
++        av_log(avctx, AV_LOG_WARNING, "Truncated VUI\n");
 +        return 0;
 +    }
 +
-     sps->timing_info_present_flag = get_bits1(&h->gb);
+     sps->timing_info_present_flag = get_bits1(gb);
      if (sps->timing_info_present_flag) {
-         unsigned num_units_in_tick = get_bits_long(&h->gb, 32);
-         unsigned time_scale        = get_bits_long(&h->gb, 32);
 -        sps->num_units_in_tick = get_bits_long(gb, 32);
 -        sps->time_scale        = get_bits_long(gb, 32);
 -        if (!sps->num_units_in_tick || !sps->time_scale) {
++        unsigned num_units_in_tick = get_bits_long(gb, 32);
++        unsigned time_scale        = get_bits_long(gb, 32);
 +        if (!num_units_in_tick || !time_scale) {
-             av_log(h->avctx, AV_LOG_ERROR,
+             av_log(avctx, AV_LOG_ERROR,
 -                   "time_scale/num_units_in_tick invalid or unsupported (%"PRIu32"/%"PRIu32")\n",
 -                   sps->time_scale, sps->num_units_in_tick);
 -            return AVERROR_INVALIDDATA;
 +                   "time_scale/num_units_in_tick invalid or unsupported (%u/%u)\n",
 +                   time_scale, num_units_in_tick);
 +            sps->timing_info_present_flag = 0;
 +        } else {
 +            sps->num_units_in_tick = num_units_in_tick;
 +            sps->time_scale = time_scale;
          }
-         sps->fixed_frame_rate_flag = get_bits1(&h->gb);
+         sps->fixed_frame_rate_flag = get_bits1(gb);
      }
  
-     sps->nal_hrd_parameters_present_flag = get_bits1(&h->gb);
+     sps->nal_hrd_parameters_present_flag = get_bits1(gb);
      if (sps->nal_hrd_parameters_present_flag)
-         if (decode_hrd_parameters(h, sps) < 0)
+         if (decode_hrd_parameters(gb, avctx, sps) < 0)
              return AVERROR_INVALIDDATA;
-     sps->vcl_hrd_parameters_present_flag = get_bits1(&h->gb);
+     sps->vcl_hrd_parameters_present_flag = get_bits1(gb);
      if (sps->vcl_hrd_parameters_present_flag)
-         if (decode_hrd_parameters(h, sps) < 0)
+         if (decode_hrd_parameters(gb, avctx, sps) < 0)
              return AVERROR_INVALIDDATA;
      if (sps->nal_hrd_parameters_present_flag ||
          sps->vcl_hrd_parameters_present_flag)
-         get_bits1(&h->gb);     /* low_delay_hrd_flag */
-     sps->pic_struct_present_flag = get_bits1(&h->gb);
-     if (!get_bits_left(&h->gb))
+         get_bits1(gb);     /* low_delay_hrd_flag */
+     sps->pic_struct_present_flag = get_bits1(gb);
 -
++    if (!get_bits_left(gb))
 +        return 0;
-     sps->bitstream_restriction_flag = get_bits1(&h->gb);
+     sps->bitstream_restriction_flag = get_bits1(gb);
      if (sps->bitstream_restriction_flag) {
-         get_bits1(&h->gb);     /* motion_vectors_over_pic_boundaries_flag */
-         get_ue_golomb(&h->gb); /* max_bytes_per_pic_denom */
-         get_ue_golomb(&h->gb); /* max_bits_per_mb_denom */
-         get_ue_golomb(&h->gb); /* log2_max_mv_length_horizontal */
-         get_ue_golomb(&h->gb); /* log2_max_mv_length_vertical */
-         sps->num_reorder_frames = get_ue_golomb(&h->gb);
-         get_ue_golomb(&h->gb); /*max_dec_frame_buffering*/
- 
-         if (get_bits_left(&h->gb) < 0) {
+         get_bits1(gb);     /* motion_vectors_over_pic_boundaries_flag */
+         get_ue_golomb(gb); /* max_bytes_per_pic_denom */
+         get_ue_golomb(gb); /* max_bits_per_mb_denom */
+         get_ue_golomb(gb); /* log2_max_mv_length_horizontal */
+         get_ue_golomb(gb); /* log2_max_mv_length_vertical */
+         sps->num_reorder_frames = get_ue_golomb(gb);
+         get_ue_golomb(gb); /*max_dec_frame_buffering*/
+ 
+         if (get_bits_left(gb) < 0) {
              sps->num_reorder_frames         = 0;
              sps->bitstream_restriction_flag = 0;
          }
@@@ -256,61 -296,59 +277,64 @@@ static void decode_scaling_matrices(Get
          fallback_sps ? sps->scaling_matrix8[0] : default_scaling8[0],
          fallback_sps ? sps->scaling_matrix8[3] : default_scaling8[1]
      };
-     if (get_bits1(&h->gb)) {
+     if (get_bits1(gb)) {
          sps->scaling_matrix_present |= is_sps;
-         decode_scaling_list(h, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]);        // Intra, Y
-         decode_scaling_list(h, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr
-         decode_scaling_list(h, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb
-         decode_scaling_list(h, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]);        // Inter, Y
-         decode_scaling_list(h, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr
-         decode_scaling_list(h, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb
+         decode_scaling_list(gb, scaling_matrix4[0], 16, default_scaling4[0], fallback[0]);        // Intra, Y
+         decode_scaling_list(gb, scaling_matrix4[1], 16, default_scaling4[0], scaling_matrix4[0]); // Intra, Cr
+         decode_scaling_list(gb, scaling_matrix4[2], 16, default_scaling4[0], scaling_matrix4[1]); // Intra, Cb
+         decode_scaling_list(gb, scaling_matrix4[3], 16, default_scaling4[1], fallback[1]);        // Inter, Y
+         decode_scaling_list(gb, scaling_matrix4[4], 16, default_scaling4[1], scaling_matrix4[3]); // Inter, Cr
+         decode_scaling_list(gb, scaling_matrix4[5], 16, default_scaling4[1], scaling_matrix4[4]); // Inter, Cb
          if (is_sps || pps->transform_8x8_mode) {
-             decode_scaling_list(h, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y
-             decode_scaling_list(h, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y
+             decode_scaling_list(gb, scaling_matrix8[0], 64, default_scaling8[0], fallback[2]); // Intra, Y
 -            if (sps->chroma_format_idc == 3) {
 -                decode_scaling_list(gb, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr
 -                decode_scaling_list(gb, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb
 -            }
+             decode_scaling_list(gb, scaling_matrix8[3], 64, default_scaling8[1], fallback[3]); // Inter, Y
              if (sps->chroma_format_idc == 3) {
-                 decode_scaling_list(h, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr
-                 decode_scaling_list(h, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr
-                 decode_scaling_list(h, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb
-                 decode_scaling_list(h, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb
++                decode_scaling_list(gb, scaling_matrix8[1], 64, default_scaling8[0], scaling_matrix8[0]); // Intra, Cr
+                 decode_scaling_list(gb, scaling_matrix8[4], 64, default_scaling8[1], scaling_matrix8[3]); // Inter, Cr
++                decode_scaling_list(gb, scaling_matrix8[2], 64, default_scaling8[0], scaling_matrix8[1]); // Intra, Cb
+                 decode_scaling_list(gb, scaling_matrix8[5], 64, default_scaling8[1], scaling_matrix8[4]); // Inter, Cb
              }
          }
      }
  }
  
- int ff_h264_decode_seq_parameter_set(H264Context *h, int ignore_truncation)
+ int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
 -                                     H264ParamSets *ps)
++                                     H264ParamSets *ps, int ignore_truncation)
  {
+     AVBufferRef *sps_buf;
      int profile_idc, level_idc, constraint_set_flags = 0;
      unsigned int sps_id;
      int i, log2_max_frame_num_minus4;
      SPS *sps;
  
-     sps = av_mallocz(sizeof(SPS));
-     if (!sps)
++    sps_buf = av_buffer_allocz(sizeof(*sps));
++    if (!sps_buf)
 +        return AVERROR(ENOMEM);
++    sps = (SPS*)sps_buf->data;
 +
-     sps->data_size = h->gb.buffer_end - h->gb.buffer;
++    sps->data_size = gb->buffer_end - gb->buffer;
 +    if (sps->data_size > sizeof(sps->data)) {
-         av_log(h->avctx, AV_LOG_WARNING, "Truncating likely oversized SPS\n");
++        av_log(avctx, AV_LOG_WARNING, "Truncating likely oversized SPS\n");
 +        sps->data_size = sizeof(sps->data);
 +    }
-     memcpy(sps->data, h->gb.buffer, sps->data_size);
- 
-     profile_idc           = get_bits(&h->gb, 8);
-     constraint_set_flags |= get_bits1(&h->gb) << 0;   // constraint_set0_flag
-     constraint_set_flags |= get_bits1(&h->gb) << 1;   // constraint_set1_flag
-     constraint_set_flags |= get_bits1(&h->gb) << 2;   // constraint_set2_flag
-     constraint_set_flags |= get_bits1(&h->gb) << 3;   // constraint_set3_flag
-     constraint_set_flags |= get_bits1(&h->gb) << 4;   // constraint_set4_flag
-     constraint_set_flags |= get_bits1(&h->gb) << 5;   // constraint_set5_flag
-     skip_bits(&h->gb, 2);                             // reserved_zero_2bits
-     level_idc = get_bits(&h->gb, 8);
-     sps_id    = get_ue_golomb_31(&h->gb);
++    memcpy(sps->data, gb->buffer, sps->data_size);
++
+     profile_idc           = get_bits(gb, 8);
+     constraint_set_flags |= get_bits1(gb) << 0;   // constraint_set0_flag
+     constraint_set_flags |= get_bits1(gb) << 1;   // constraint_set1_flag
+     constraint_set_flags |= get_bits1(gb) << 2;   // constraint_set2_flag
+     constraint_set_flags |= get_bits1(gb) << 3;   // constraint_set3_flag
+     constraint_set_flags |= get_bits1(gb) << 4;   // constraint_set4_flag
+     constraint_set_flags |= get_bits1(gb) << 5;   // constraint_set5_flag
+     skip_bits(gb, 2);                             // reserved_zero_2bits
+     level_idc = get_bits(gb, 8);
+     sps_id    = get_ue_golomb_31(gb);
  
      if (sps_id >= MAX_SPS_COUNT) {
-         av_log(h->avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id);
+         av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", sps_id);
 -        return AVERROR_INVALIDDATA;
 +        goto fail;
      }
  
 -    sps_buf = av_buffer_allocz(sizeof(*sps));
 -    if (!sps_buf)
 -        return AVERROR(ENOMEM);
 -    sps = (SPS*)sps_buf->data;
 -
      sps->sps_id               = sps_id;
      sps->time_offset_length   = 24;
      sps->profile_idc          = profile_idc;
@@@ -334,33 -370,23 +358,33 @@@
          sps->profile_idc == 128 ||  // Multiview High profile (MVC)
          sps->profile_idc == 138 ||  // Multiview Depth High profile (MVCD)
          sps->profile_idc == 144) {  // old High444 profile
-         sps->chroma_format_idc = get_ue_golomb_31(&h->gb);
+         sps->chroma_format_idc = get_ue_golomb_31(gb);
 -        if (sps->chroma_format_idc > 3) {
 +        if (sps->chroma_format_idc > 3U) {
-             avpriv_request_sample(h->avctx, "chroma_format_idc %u",
+             avpriv_request_sample(avctx, "chroma_format_idc %u",
                                    sps->chroma_format_idc);
              goto fail;
          } else if (sps->chroma_format_idc == 3) {
-             sps->residual_color_transform_flag = get_bits1(&h->gb);
+             sps->residual_color_transform_flag = get_bits1(gb);
 +            if (sps->residual_color_transform_flag) {
-                 av_log(h->avctx, AV_LOG_ERROR, "separate color planes are not supported\n");
++                av_log(avctx, AV_LOG_ERROR, "separate color planes are not supported\n");
 +                goto fail;
 +            }
          }
-         sps->bit_depth_luma   = get_ue_golomb(&h->gb) + 8;
-         sps->bit_depth_chroma = get_ue_golomb(&h->gb) + 8;
+         sps->bit_depth_luma   = get_ue_golomb(gb) + 8;
+         sps->bit_depth_chroma = get_ue_golomb(gb) + 8;
          if (sps->bit_depth_chroma != sps->bit_depth_luma) {
-             avpriv_request_sample(h->avctx,
+             avpriv_request_sample(avctx,
                                    "Different chroma and luma bit depth");
              goto fail;
          }
 +        if (sps->bit_depth_luma   < 8 || sps->bit_depth_luma   > 14 ||
 +            sps->bit_depth_chroma < 8 || sps->bit_depth_chroma > 14) {
-             av_log(h->avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
++            av_log(avctx, AV_LOG_ERROR, "illegal bit depth value (%d, %d)\n",
 +                   sps->bit_depth_luma, sps->bit_depth_chroma);
 +            goto fail;
 +        }
-         sps->transform_bypass = get_bits1(&h->gb);
-         decode_scaling_matrices(h, sps, NULL, 1,
+         sps->transform_bypass = get_bits1(gb);
+         decode_scaling_matrices(gb, sps, NULL, 1,
                                  sps->scaling_matrix4, sps->scaling_matrix8);
      } else {
          sps->chroma_format_idc = 1;
@@@ -378,20 -404,15 +402,20 @@@
      }
      sps->log2_max_frame_num = log2_max_frame_num_minus4 + 4;
  
-     sps->poc_type = get_ue_golomb_31(&h->gb);
+     sps->poc_type = get_ue_golomb_31(gb);
  
      if (sps->poc_type == 0) { // FIXME #define
-         unsigned t = get_ue_golomb(&h->gb);
 -        sps->log2_max_poc_lsb = get_ue_golomb(gb) + 4;
++        unsigned t = get_ue_golomb(gb);
 +        if (t>12) {
-             av_log(h->avctx, AV_LOG_ERROR, "log2_max_poc_lsb (%d) is out of range\n", t);
++            av_log(avctx, AV_LOG_ERROR, "log2_max_poc_lsb (%d) is out of range\n", t);
 +            goto fail;
 +        }
 +        sps->log2_max_poc_lsb = t + 4;
      } else if (sps->poc_type == 1) { // FIXME #define
-         sps->delta_pic_order_always_zero_flag = get_bits1(&h->gb);
-         sps->offset_for_non_ref_pic           = get_se_golomb(&h->gb);
-         sps->offset_for_top_to_bottom_field   = get_se_golomb(&h->gb);
-         sps->poc_cycle_length                 = get_ue_golomb(&h->gb);
+         sps->delta_pic_order_always_zero_flag = get_bits1(gb);
+         sps->offset_for_non_ref_pic           = get_se_golomb(gb);
+         sps->offset_for_top_to_bottom_field   = get_se_golomb(gb);
+         sps->poc_cycle_length                 = get_ue_golomb(gb);
  
          if ((unsigned)sps->poc_cycle_length >=
              FF_ARRAY_ELEMS(sps->offset_for_ref_frame)) {
@@@ -407,12 -428,10 +431,12 @@@
          goto fail;
      }
  
-     sps->ref_frame_count = get_ue_golomb_31(&h->gb);
-     if (h->avctx->codec_tag == MKTAG('S', 'M', 'V', '2'))
+     sps->ref_frame_count = get_ue_golomb_31(gb);
++    if (avctx->codec_tag == MKTAG('S', 'M', 'V', '2'))
 +        sps->ref_frame_count = FFMAX(2, sps->ref_frame_count);
      if (sps->ref_frame_count > H264_MAX_PICTURE_COUNT - 2 ||
 -        sps->ref_frame_count >= 32U) {
 +        sps->ref_frame_count > 16U) {
-         av_log(h->avctx, AV_LOG_ERROR,
+         av_log(avctx, AV_LOG_ERROR,
                 "too many reference frames %d\n", sps->ref_frame_count);
          goto fail;
      }
@@@ -433,24 -452,27 +457,24 @@@
      else
          sps->mb_aff = 0;
  
-     sps->direct_8x8_inference_flag = get_bits1(&h->gb);
+     sps->direct_8x8_inference_flag = get_bits1(gb);
  
  #ifndef ALLOW_INTERLACE
      if (sps->mb_aff)
-         av_log(h->avctx, AV_LOG_ERROR,
+         av_log(avctx, AV_LOG_ERROR,
                 "MBAFF support not included; enable it at compile-time.\n");
  #endif
-     sps->crop = get_bits1(&h->gb);
+     sps->crop = get_bits1(gb);
      if (sps->crop) {
-         unsigned int crop_left   = get_ue_golomb(&h->gb);
-         unsigned int crop_right  = get_ue_golomb(&h->gb);
-         unsigned int crop_top    = get_ue_golomb(&h->gb);
-         unsigned int crop_bottom = get_ue_golomb(&h->gb);
+         unsigned int crop_left   = get_ue_golomb(gb);
+         unsigned int crop_right  = get_ue_golomb(gb);
+         unsigned int crop_top    = get_ue_golomb(gb);
+         unsigned int crop_bottom = get_ue_golomb(gb);
 +        int width  = 16 * sps->mb_width;
 +        int height = 16 * sps->mb_height * (2 - sps->frame_mbs_only_flag);
  
-         if (h->avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) {
-             av_log(h->avctx, AV_LOG_DEBUG, "discarding sps cropping, original "
+         if (avctx->flags2 & AV_CODEC_FLAG2_IGNORE_CROP) {
+             av_log(avctx, AV_LOG_DEBUG, "discarding sps cropping, original "
                                             "values are l:%d r:%d t:%d b:%d\n",
                     crop_left, crop_right, crop_top, crop_bottom);
  
@@@ -474,15 -496,16 +498,15 @@@
                         crop_left);
              }
  
 -            if (INT_MAX / step_x             <= crop_left               ||
 -                INT_MAX / step_x - crop_left <= crop_right              ||
 -                16 * sps->mb_width <= step_x * (crop_left + crop_right) ||
 -                INT_MAX / step_y             <= crop_top                ||
 -                INT_MAX / step_y - crop_top  <= crop_bottom             ||
 -                16 * sps->mb_height <= step_y * (crop_top + crop_bottom)) {
 -                av_log(avctx, AV_LOG_WARNING, "Invalid crop parameters\n");
 -                if (avctx->err_recognition & AV_EF_EXPLODE)
 -                    goto fail;
 -                crop_left = crop_right = crop_top = crop_bottom = 0;
 +            if (crop_left  > (unsigned)INT_MAX / 4 / step_x ||
 +                crop_right > (unsigned)INT_MAX / 4 / step_x ||
 +                crop_top   > (unsigned)INT_MAX / 4 / step_y ||
 +                crop_bottom> (unsigned)INT_MAX / 4 / step_y ||
 +                (crop_left + crop_right ) * step_x >= width ||
 +                (crop_top  + crop_bottom) * step_y >= height
 +            ) {
-                 av_log(h->avctx, AV_LOG_ERROR, "crop values invalid %d %d %d %d / %d %d\n", crop_left, crop_right, crop_top, crop_bottom, width, height);
++                av_log(avctx, AV_LOG_ERROR, "crop values invalid %d %d %d %d / %d %d\n", crop_left, crop_right, crop_top, crop_bottom, width, height);
 +                goto fail;
              }
  
              sps->crop_left   = crop_left   * step_x;
@@@ -498,17 -521,10 +522,17 @@@
          sps->crop        = 0;
      }
  
-     sps->vui_parameters_present_flag = get_bits1(&h->gb);
+     sps->vui_parameters_present_flag = get_bits1(gb);
      if (sps->vui_parameters_present_flag) {
-         int ret = decode_vui_parameters(h, sps);
+         int ret = decode_vui_parameters(gb, avctx, sps);
 -        if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE)
 +        if (ret < 0)
 +            goto fail;
 +    }
 +
-     if (get_bits_left(&h->gb) < 0) {
-         av_log(h->avctx, ignore_truncation ? AV_LOG_WARNING : AV_LOG_ERROR,
-                "Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(&h->gb));
++    if (get_bits_left(gb) < 0) {
++        av_log(avctx, ignore_truncation ? AV_LOG_WARNING : AV_LOG_ERROR,
++               "Overread %s by %d bits\n", sps->vui_parameters_present_flag ? "VUI" : "SPS", -get_bits_left(gb));
 +        if (!ignore_truncation)
              goto fail;
      }
  
@@@ -528,10 -544,10 +552,10 @@@
      if (!sps->sar.den)
          sps->sar.den = 1;
  
-     if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
+     if (avctx->debug & FF_DEBUG_PICT_INFO) {
          static const char csp[4][5] = { "Gray", "420", "422", "444" };
-         av_log(h->avctx, AV_LOG_DEBUG,
+         av_log(avctx, AV_LOG_DEBUG,
 -               "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %"PRId32"/%"PRId32"\n",
 +               "sps:%u profile:%d/%d poc:%d ref:%d %dx%d %s %s crop:%u/%u/%u/%u %s %s %"PRId32"/%"PRId32" b%d reo:%d\n",
                 sps_id, sps->profile_idc, sps->level_idc,
                 sps->poc_type,
                 sps->ref_frame_count,
@@@ -543,15 -559,19 +567,22 @@@
                 sps->vui_parameters_present_flag ? "VUI" : "",
                 csp[sps->chroma_format_idc],
                 sps->timing_info_present_flag ? sps->num_units_in_tick : 0,
 -               sps->timing_info_present_flag ? sps->time_scale : 0);
 +               sps->timing_info_present_flag ? sps->time_scale : 0,
 +               sps->bit_depth_luma,
 +               sps->bitstream_restriction_flag ? sps->num_reorder_frames : -1
 +               );
      }
-     sps->new = 1;
  
-     av_free(h->sps_buffers[sps_id]);
-     h->sps_buffers[sps_id] = sps;
+     /* check if this is a repeat of an already parsed SPS, then keep the
+      * original one.
+      * otherwise drop all PPSes that depend on it */
+     if (ps->sps_list[sps_id] &&
+         !memcmp(ps->sps_list[sps_id]->data, sps_buf->data, sps_buf->size)) {
+         av_buffer_unref(&sps_buf);
+     } else {
+         remove_sps(ps, sps_id);
+         ps->sps_list[sps_id] = sps_buf;
+     }
  
      return 0;
  
@@@ -560,6 -580,76 +591,78 @@@ fail
      return AVERROR_INVALIDDATA;
  }
  
+ static void init_dequant8_coeff_table(PPS *pps, const SPS *sps)
+ {
+     int i, j, q, x;
+     const int max_qp = 51 + 6 * (sps->bit_depth_luma - 8);
+ 
+     for (i = 0; i < 6; i++) {
+         pps->dequant8_coeff[i] = pps->dequant8_buffer[i];
+         for (j = 0; j < i; j++)
+             if (!memcmp(pps->scaling_matrix8[j], pps->scaling_matrix8[i],
+                         64 * sizeof(uint8_t))) {
+                 pps->dequant8_coeff[i] = pps->dequant8_buffer[j];
+                 break;
+             }
+         if (j < i)
+             continue;
+ 
+         for (q = 0; q < max_qp + 1; q++) {
+             int shift = ff_h264_quant_div6[q];
+             int idx   = ff_h264_quant_rem6[q];
+             for (x = 0; x < 64; x++)
+                 pps->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
+                     ((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
+                      pps->scaling_matrix8[i][x]) << shift;
+         }
+     }
+ }
+ 
+ static void init_dequant4_coeff_table(PPS *pps, const SPS *sps)
+ {
+     int i, j, q, x;
+     const int max_qp = 51 + 6 * (sps->bit_depth_luma - 8);
+     for (i = 0; i < 6; i++) {
+         pps->dequant4_coeff[i] = pps->dequant4_buffer[i];
+         for (j = 0; j < i; j++)
+             if (!memcmp(pps->scaling_matrix4[j], pps->scaling_matrix4[i],
+                         16 * sizeof(uint8_t))) {
+                 pps->dequant4_coeff[i] = pps->dequant4_buffer[j];
+                 break;
+             }
+         if (j < i)
+             continue;
+ 
+         for (q = 0; q < max_qp + 1; q++) {
+             int shift = ff_h264_quant_div6[q] + 2;
+             int idx   = ff_h264_quant_rem6[q];
+             for (x = 0; x < 16; x++)
+                 pps->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
+                     ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
+                      pps->scaling_matrix4[i][x]) << shift;
+         }
+     }
+ }
+ 
+ static void init_dequant_tables(PPS *pps, const SPS *sps)
+ {
+     int i, x;
+     init_dequant4_coeff_table(pps, sps);
++    memset(pps->dequant8_coeff, 0, sizeof(pps->dequant8_coeff));
++
+     if (pps->transform_8x8_mode)
+         init_dequant8_coeff_table(pps, sps);
+     if (sps->transform_bypass) {
+         for (i = 0; i < 6; i++)
+             for (x = 0; x < 16; x++)
+                 pps->dequant4_coeff[i][0][x] = 1 << 6;
+         if (pps->transform_8x8_mode)
+             for (i = 0; i < 6; i++)
+                 for (x = 0; x < 64; x++)
+                     pps->dequant8_coeff[i][0][x] = 1 << 6;
+     }
+ }
+ 
  static void build_qp_table(PPS *pps, int t, int index, const int depth)
  {
      int i;
@@@ -569,25 -659,12 +672,26 @@@
              ff_h264_chroma_qp[depth - 8][av_clip(i + index, 0, max_qp)];
  }
  
- static int more_rbsp_data_in_pps(H264Context *h, PPS *pps)
++static int more_rbsp_data_in_pps(const SPS *sps, void *logctx)
 +{
-     const SPS *sps = h->sps_buffers[pps->sps_id];
 +    int profile_idc = sps->profile_idc;
 +
 +    if ((profile_idc == 66 || profile_idc == 77 ||
 +         profile_idc == 88) && (sps->constraint_set_flags & 7)) {
-         av_log(h->avctx, AV_LOG_VERBOSE,
++        av_log(logctx, AV_LOG_VERBOSE,
 +               "Current profile doesn't provide more RBSP data in PPS, skipping\n");
 +        return 0;
 +    }
 +
 +    return 1;
 +}
 +
- int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length)
+ int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
+                                          H264ParamSets *ps, int bit_length)
  {
-     const SPS *sps;
-     unsigned int pps_id = get_ue_golomb(&h->gb);
+     AVBufferRef *pps_buf;
+     SPS *sps;
+     unsigned int pps_id = get_ue_golomb(gb);
      PPS *pps;
      int qp_bd_offset;
      int bits_left;
@@@ -598,32 -675,23 +702,35 @@@
          return AVERROR_INVALIDDATA;
      }
  
-     pps = av_mallocz(sizeof(PPS));
-     if (!pps)
+     pps_buf = av_buffer_allocz(sizeof(*pps));
+     if (!pps_buf)
          return AVERROR(ENOMEM);
-     pps->data_size = h->gb.buffer_end - h->gb.buffer;
+     pps = (PPS*)pps_buf->data;
+ 
++    pps->data_size = gb->buffer_end - gb->buffer;
 +    if (pps->data_size > sizeof(pps->data)) {
-         av_log(h->avctx, AV_LOG_WARNING, "Truncating likely oversized PPS\n");
++        av_log(avctx, AV_LOG_WARNING, "Truncating likely oversized PPS\n");
 +        pps->data_size = sizeof(pps->data);
 +    }
-     memcpy(pps->data, h->gb.buffer, pps->data_size);
-     pps->sps_id = get_ue_golomb_31(&h->gb);
++    memcpy(pps->data, gb->buffer, pps->data_size);
++
+     pps->sps_id = get_ue_golomb_31(gb);
      if ((unsigned)pps->sps_id >= MAX_SPS_COUNT ||
-         !h->sps_buffers[pps->sps_id]) {
-         av_log(h->avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id);
+         !ps->sps_list[pps->sps_id]) {
+         av_log(avctx, AV_LOG_ERROR, "sps_id %u out of range\n", pps->sps_id);
          ret = AVERROR_INVALIDDATA;
          goto fail;
      }
-     sps = h->sps_buffers[pps->sps_id];
+     sps = (SPS*)ps->sps_list[pps->sps_id]->data;
 -
 -    if (sps->bit_depth_luma > 10) {
 +    if (sps->bit_depth_luma > 14) {
-         av_log(h->avctx, AV_LOG_ERROR,
++        av_log(avctx, AV_LOG_ERROR,
 +               "Invalid luma bit depth=%d\n",
 +               sps->bit_depth_luma);
 +        ret = AVERROR_INVALIDDATA;
 +        goto fail;
 +    } else if (sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13) {
-         av_log(h->avctx, AV_LOG_ERROR,
+         av_log(avctx, AV_LOG_ERROR,
 -               "Unimplemented luma bit depth=%d (max=10)\n",
 +               "Unimplemented luma bit depth=%d\n",
                 sps->bit_depth_luma);
          ret = AVERROR_PATCHWELCOME;
          goto fail;
@@@ -677,30 -745,29 +784,28 @@@
  
      qp_bd_offset = 6 * (sps->bit_depth_luma - 8);
  
-     pps->weighted_pred                        = get_bits1(&h->gb);
-     pps->weighted_bipred_idc                  = get_bits(&h->gb, 2);
-     pps->init_qp                              = get_se_golomb(&h->gb) + 26 + qp_bd_offset;
-     pps->init_qs                              = get_se_golomb(&h->gb) + 26 + qp_bd_offset;
-     pps->chroma_qp_index_offset[0]            = get_se_golomb(&h->gb);
-     pps->deblocking_filter_parameters_present = get_bits1(&h->gb);
-     pps->constrained_intra_pred               = get_bits1(&h->gb);
-     pps->redundant_pic_cnt_present            = get_bits1(&h->gb);
+     pps->weighted_pred                        = get_bits1(gb);
+     pps->weighted_bipred_idc                  = get_bits(gb, 2);
+     pps->init_qp                              = get_se_golomb(gb) + 26 + qp_bd_offset;
+     pps->init_qs                              = get_se_golomb(gb) + 26 + qp_bd_offset;
+     pps->chroma_qp_index_offset[0]            = get_se_golomb(gb);
+     pps->deblocking_filter_parameters_present = get_bits1(gb);
+     pps->constrained_intra_pred               = get_bits1(gb);
+     pps->redundant_pic_cnt_present            = get_bits1(gb);
  
      pps->transform_8x8_mode = 0;
-     // contents of sps/pps can change even if id doesn't, so reinit
-     h->dequant_coeff_pps = -1;
-     memcpy(pps->scaling_matrix4, h->sps_buffers[pps->sps_id]->scaling_matrix4,
+     memcpy(pps->scaling_matrix4, sps->scaling_matrix4,
             sizeof(pps->scaling_matrix4));
-     memcpy(pps->scaling_matrix8, h->sps_buffers[pps->sps_id]->scaling_matrix8,
+     memcpy(pps->scaling_matrix8, sps->scaling_matrix8,
             sizeof(pps->scaling_matrix8));
  
-     bits_left = bit_length - get_bits_count(&h->gb);
-     if (bits_left > 0 && more_rbsp_data_in_pps(h, pps)) {
-         pps->transform_8x8_mode = get_bits1(&h->gb);
-         decode_scaling_matrices(h, h->sps_buffers[pps->sps_id], pps, 0,
+     bits_left = bit_length - get_bits_count(gb);
 -    if (bits_left && (bits_left > 8 ||
 -                      show_bits(gb, bits_left) != 1 << (bits_left - 1))) {
++    if (bits_left > 0 && more_rbsp_data_in_pps(sps, avctx)) {
+         pps->transform_8x8_mode = get_bits1(gb);
+         decode_scaling_matrices(gb, sps, pps, 0,
                                  pps->scaling_matrix4, pps->scaling_matrix8);
          // second_chroma_qp_index_offset
-         pps->chroma_qp_index_offset[1] = get_se_golomb(&h->gb);
+         pps->chroma_qp_index_offset[1] = get_se_golomb(gb);
      } else {
          pps->chroma_qp_index_offset[1] = pps->chroma_qp_index_offset[0];
      }
diff --cc libavcodec/h264_refs.c
index 279dbcf,e0a9334..02c7867
--- a/libavcodec/h264_refs.c
+++ b/libavcodec/h264_refs.c
@@@ -591,8 -526,10 +591,8 @@@ int ff_generate_sliding_window_mmcos(H2
      MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
      int mmco_index = 0, i = 0;
  
 -    assert(h->long_ref_count + h->short_ref_count <= h->ps.sps->ref_frame_count);
 -
      if (h->short_ref_count &&
-         h->long_ref_count + h->short_ref_count >= h->sps.ref_frame_count &&
 -        h->long_ref_count + h->short_ref_count == h->ps.sps->ref_frame_count &&
++        h->long_ref_count + h->short_ref_count >= h->ps.sps->ref_frame_count &&
          !(FIELD_PICTURE(h) && !h->first_field && h->cur_pic_ptr->reference)) {
          mmco[0].opcode        = MMCO_SHORT2UNUSED;
          mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
@@@ -768,7 -697,8 +768,7 @@@ int ff_h264_execute_ref_pic_marking(H26
          }
      }
  
-     if (h->long_ref_count + h->short_ref_count > FFMAX(h->sps.ref_frame_count, 1)) {
 -    if (h->long_ref_count + h->short_ref_count -
 -        (h->short_ref[0] == h->cur_pic_ptr) > h->ps.sps->ref_frame_count) {
++    if (h->long_ref_count + h->short_ref_count > FFMAX(h->ps.sps->ref_frame_count, 1)) {
  
          /* We have too many reference frames, probably due to corrupted
           * stream. Need to discard one frame. Prevents overrun of the
@@@ -793,36 -723,8 +793,37 @@@
          }
      }
  
 +    for (i = 0; i<h->short_ref_count; i++) {
 +        pic = h->short_ref[i];
 +        if (pic->invalid_gap) {
-             int d = av_mod_uintp2(h->cur_pic_ptr->frame_num - pic->frame_num, h->sps.log2_max_frame_num);
-             if (d > h->sps.ref_frame_count)
++            int d = av_mod_uintp2(h->cur_pic_ptr->frame_num - pic->frame_num, h->ps.sps->log2_max_frame_num);
++            if (d > h->ps.sps->ref_frame_count)
 +                remove_short(h, pic->frame_num, 0);
 +        }
 +    }
 +
      print_short_term(h);
      print_long_term(h);
 +
-     for (i = 0; i < FF_ARRAY_ELEMS(h->pps_buffers); i++) {
-         if (h->pps_buffers[i]) {
-             pps_ref_count[0] = FFMAX(pps_ref_count[0], h->pps_buffers[i]->ref_count[0]);
-             pps_ref_count[1] = FFMAX(pps_ref_count[1], h->pps_buffers[i]->ref_count[1]);
++    for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
++        if (h->ps.pps_list[i]) {
++            const PPS *pps = (const PPS *)h->ps.pps_list[i]->data;
++            pps_ref_count[0] = FFMAX(pps_ref_count[0], pps->ref_count[0]);
++            pps_ref_count[1] = FFMAX(pps_ref_count[1], pps->ref_count[1]);
 +        }
 +    }
 +
 +    if (   err >= 0
 +        && h->long_ref_count==0
 +        && (   h->short_ref_count<=2
 +            || pps_ref_count[0] <= 1 + (h->picture_structure != PICT_FRAME) && pps_ref_count[1] <= 1)
 +        && pps_ref_count[0]<=2 + (h->picture_structure != PICT_FRAME) + (2*!h->has_recovery_point)
 +        && h->cur_pic_ptr->f->pict_type == AV_PICTURE_TYPE_I){
 +        h->cur_pic_ptr->recovered |= 1;
 +        if(!h->avctx->has_b_frames)
 +            h->frame_recovered |= FRAME_RECOVERED_SEI;
 +    }
 +
      return (h->avctx->err_recognition & AV_EF_EXPLODE) ? err : 0;
  }
  
diff --cc libavcodec/h264_sei.c
index 77dd7b2,aedb295..bdc5c9f
--- a/libavcodec/h264_sei.c
+++ b/libavcodec/h264_sei.c
@@@ -50,18 -50,17 +50,23 @@@ void ff_h264_reset_sei(H264Context *h
  
  static int decode_picture_timing(H264Context *h)
  {
-     SPS *sps = &h->sps;
+     const SPS *sps = h->ps.sps;
 +    int i;
  
 -    if (!sps)
 -        return AVERROR_INVALIDDATA;
 +    for (i = 0; i<MAX_SPS_COUNT; i++)
-         if (!sps->log2_max_frame_num && h->sps_buffers[i])
-             sps = h->sps_buffers[i];
++        if ((!sps || !sps->log2_max_frame_num) && h->ps.sps_list[i])
++            sps = (const SPS *)h->ps.sps_list[i]->data;
++
++    if (!sps) {
++        av_log(h->avctx, AV_LOG_ERROR, "SPS unavailable in decode_picture_timing\n");
++        return 0;
++    }
  
 -    if (sps->nal_hrd_parameters_present_flag ||
 -        sps->vcl_hrd_parameters_present_flag) {
 -        h->sei_cpb_removal_delay = get_bits(&h->gb,
 -                                            sps->cpb_removal_delay_length);
 -        h->sei_dpb_output_delay  = get_bits(&h->gb,
 -                                            sps->dpb_output_delay_length);
 +    if (sps->nal_hrd_parameters_present_flag || sps->vcl_hrd_parameters_present_flag) {
 +        h->sei_cpb_removal_delay = get_bits_long(&h->gb,
 +                                                 sps->cpb_removal_delay_length);
 +        h->sei_dpb_output_delay  = get_bits_long(&h->gb,
 +                                                 sps->dpb_output_delay_length);
      }
      if (sps->pic_struct_present_flag) {
          unsigned int i, num_clock_ts;
diff --cc libavcodec/h264_slice.c
index 6f9a041,e2617e2..394a0c4
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@@ -192,10 -192,6 +192,10 @@@ static int alloc_picture(H264Context *h
      if (ret < 0)
          goto fail;
  
-     pic->crop     = h->sps.crop;
-     pic->crop_top = h->sps.crop_top;
-     pic->crop_left= h->sps.crop_left;
++    pic->crop     = h->ps.sps->crop;
++    pic->crop_top = h->ps.sps->crop_top;
++    pic->crop_left= h->ps.sps->crop_left;
 +
      if (h->avctx->hwaccel) {
          const AVHWAccel *hwaccel = h->avctx->hwaccel;
          av_assert0(!pic->hwaccel_picture_private);
@@@ -270,85 -254,40 +270,13 @@@ static int find_unused_picture(H264Cont
      return i;
  }
  
 -static int initialize_cur_frame(H264Context *h)
 -{
 -    H264Picture *cur;
 -    int ret;
 -
 -    release_unused_pictures(h, 1);
 -    ff_h264_unref_picture(h, &h->cur_pic);
 -    h->cur_pic_ptr = NULL;
 -
 -    ret = find_unused_picture(h);
 -    if (ret < 0) {
 -        av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
 -        return ret;
 -    }
 -    cur = &h->DPB[ret];
  
- static void init_dequant8_coeff_table(H264Context *h)
- {
-     int i, j, q, x;
-     const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
- 
-     for (i = 0; i < 6; i++) {
-         h->dequant8_coeff[i] = h->dequant8_buffer[i];
-         for (j = 0; j < i; j++)
-             if (!memcmp(h->pps.scaling_matrix8[j], h->pps.scaling_matrix8[i],
-                         64 * sizeof(uint8_t))) {
-                 h->dequant8_coeff[i] = h->dequant8_buffer[j];
-                 break;
-             }
-         if (j < i)
-             continue;
- 
-         for (q = 0; q < max_qp + 1; q++) {
-             int shift = ff_h264_quant_div6[q];
-             int idx   = ff_h264_quant_rem6[q];
-             for (x = 0; x < 64; x++)
-                 h->dequant8_coeff[i][q][(x >> 3) | ((x & 7) << 3)] =
-                     ((uint32_t)ff_h264_dequant8_coeff_init[idx][ff_h264_dequant8_coeff_init_scan[((x >> 1) & 12) | (x & 3)]] *
-                      h->pps.scaling_matrix8[i][x]) << shift;
-         }
-     }
- }
- 
- static void init_dequant4_coeff_table(H264Context *h)
- {
-     int i, j, q, x;
-     const int max_qp = 51 + 6 * (h->sps.bit_depth_luma - 8);
-     for (i = 0; i < 6; i++) {
-         h->dequant4_coeff[i] = h->dequant4_buffer[i];
-         for (j = 0; j < i; j++)
-             if (!memcmp(h->pps.scaling_matrix4[j], h->pps.scaling_matrix4[i],
-                         16 * sizeof(uint8_t))) {
-                 h->dequant4_coeff[i] = h->dequant4_buffer[j];
-                 break;
-             }
-         if (j < i)
-             continue;
- 
-         for (q = 0; q < max_qp + 1; q++) {
-             int shift = ff_h264_quant_div6[q] + 2;
-             int idx   = ff_h264_quant_rem6[q];
-             for (x = 0; x < 16; x++)
-                 h->dequant4_coeff[i][q][(x >> 2) | ((x << 2) & 0xF)] =
-                     ((uint32_t)ff_h264_dequant4_coeff_init[idx][(x & 1) + ((x >> 2) & 1)] *
-                      h->pps.scaling_matrix4[i][x]) << shift;
-         }
-     }
- }
- 
- void ff_h264_init_dequant_tables(H264Context *h)
- {
-     int i, x;
-     init_dequant4_coeff_table(h);
-     memset(h->dequant8_coeff, 0, sizeof(h->dequant8_coeff));
- 
-     if (h->pps.transform_8x8_mode)
-         init_dequant8_coeff_table(h);
-     if (h->sps.transform_bypass) {
-         for (i = 0; i < 6; i++)
-             for (x = 0; x < 16; x++)
-                 h->dequant4_coeff[i][0][x] = 1 << 6;
-         if (h->pps.transform_8x8_mode)
-             for (i = 0; i < 6; i++)
-                 for (x = 0; x < 64; x++)
-                     h->dequant8_coeff[i][0][x] = 1 << 6;
-     }
- }
- 
 -    ret = alloc_picture(h, cur);
 -    if (ret < 0)
 -        return ret;
 -
 -    ret = ff_h264_ref_picture(h, &h->cur_pic, cur);
 -    if (ret < 0)
 -        return ret;
 -    h->cur_pic_ptr = cur;
 -
 -    return 0;
 -}
 -
 -#define IN_RANGE(a, b, size) (((a) >= (b)) && ((a) < ((b) + (size))))
 +#define IN_RANGE(a, b, size) (((void*)(a) >= (void*)(b)) && ((void*)(a) < (void*)((b) + (size))))
  
  #define REBASE_PICTURE(pic, new_ctx, old_ctx)             \
 -    ((pic && pic >= old_ctx->DPB &&                       \
 -      pic < old_ctx->DPB + H264_MAX_PICTURE_COUNT) ?          \
 -     &new_ctx->DPB[pic - old_ctx->DPB] : NULL)
 +    (((pic) && (pic) >= (old_ctx)->DPB &&                       \
 +      (pic) < (old_ctx)->DPB + H264_MAX_PICTURE_COUNT) ?          \
 +     &(new_ctx)->DPB[(pic) - (old_ctx)->DPB] : NULL)
  
  static void copy_picture_range(H264Picture **to, H264Picture **from, int count,
                                 H264Context *new_base,
@@@ -364,29 -304,9 +292,9 @@@
      }
  }
  
- static int copy_parameter_set(void **to, void **from, int count, int size)
- {
-     int i;
- 
-     for (i = 0; i < count; i++) {
-         if (to[i] && !from[i]) {
-             av_freep(&to[i]);
-         } else if (from[i] && !to[i]) {
-             to[i] = av_malloc(size);
-             if (!to[i])
-                 return AVERROR(ENOMEM);
-         }
- 
-         if (from[i])
-             memcpy(to[i], from[i], size);
-     }
- 
-     return 0;
- }
- 
  #define copy_fields(to, from, start_field, end_field)                   \
 -    memcpy(&to->start_field, &from->start_field,                        \
 -           (char *)&to->end_field - (char *)&to->start_field)
 +    memcpy(&(to)->start_field, &(from)->start_field,                        \
 +           (char *)&(to)->end_field - (char *)&(to)->start_field)
  
  static int h264_slice_header_init(H264Context *h);
  
@@@ -398,9 -318,12 +306,13 @@@ int ff_h264_update_thread_context(AVCod
      int need_reinit = 0;
      int i, ret;
  
 -    if (dst == src || !h1->context_initialized)
 +    if (dst == src)
          return 0;
  
 -    if (!h1->ps.sps)
 -        return AVERROR_INVALIDDATA;
++    // We can't fail if SPS isn't set at it breaks current skip_frame code
++    //if (!h1->ps.sps)
++    //    return AVERROR_INVALIDDATA;
+ 
      if (inited &&
          (h->width                 != h1->width                 ||
           h->height                != h1->height                ||
@@@ -413,20 -336,25 +325,43 @@@
          need_reinit = 1;
      }
  
 +    /* copy block_offset since frame_start may not be called */
 +    memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
 +
      // SPS/PPS
-     if ((ret = copy_parameter_set((void **)h->sps_buffers,
-                                   (void **)h1->sps_buffers,
-                                   MAX_SPS_COUNT, sizeof(SPS))) < 0)
-         return ret;
-     h->sps = h1->sps;
-     if ((ret = copy_parameter_set((void **)h->pps_buffers,
-                                   (void **)h1->pps_buffers,
-                                   MAX_PPS_COUNT, sizeof(PPS))) < 0)
-         return ret;
-     h->pps = h1->pps;
+     for (i = 0; i < FF_ARRAY_ELEMS(h->ps.sps_list); i++) {
+         av_buffer_unref(&h->ps.sps_list[i]);
+         if (h1->ps.sps_list[i]) {
+             h->ps.sps_list[i] = av_buffer_ref(h1->ps.sps_list[i]);
+             if (!h->ps.sps_list[i])
+                 return AVERROR(ENOMEM);
+         }
+     }
+     for (i = 0; i < FF_ARRAY_ELEMS(h->ps.pps_list); i++) {
+         av_buffer_unref(&h->ps.pps_list[i]);
+         if (h1->ps.pps_list[i]) {
+             h->ps.pps_list[i] = av_buffer_ref(h1->ps.pps_list[i]);
+             if (!h->ps.pps_list[i])
+                 return AVERROR(ENOMEM);
+         }
+     }
+ 
 -    h->ps.sps = h1->ps.sps;
++    av_buffer_unref(&h->ps.pps_ref);
++    av_buffer_unref(&h->ps.sps_ref);
++    h->ps.pps = NULL;
++    h->ps.sps = NULL;
++    if (h1->ps.pps_ref) {
++        h->ps.pps_ref = av_buffer_ref(h1->ps.pps_ref);
++        if (!h->ps.pps_ref)
++            return AVERROR(ENOMEM);
++        h->ps.pps = h->ps.pps_ref->data;
++    }
++    if (h1->ps.sps_ref) {
++        h->ps.sps_ref = av_buffer_ref(h1->ps.sps_ref);
++        if (!h->ps.sps_ref)
++            return AVERROR(ENOMEM);
++        h->ps.sps = h->ps.sps_ref->data;
++    }
  
      if (need_reinit || !inited) {
          h->width     = h1->width;
@@@ -483,22 -407,7 +418,8 @@@
      // extradata/NAL handling
      h->is_avc = h1->is_avc;
      h->nal_length_size = h1->nal_length_size;
 +    h->x264_build      = h1->x264_build;
  
-     // Dequantization matrices
-     // FIXME these are big - can they be only copied when PPS changes?
-     copy_fields(h, h1, dequant4_buffer, dequant4_coeff);
- 
-     for (i = 0; i < 6; i++)
-         h->dequant4_coeff[i] = h->dequant4_buffer[0] +
-                                (h1->dequant4_coeff[i] - h1->dequant4_buffer[0]);
- 
-     for (i = 0; i < 6; i++)
-         h->dequant8_coeff[i] = h->dequant8_buffer[0] +
-                                (h1->dequant8_coeff[i] - h1->dequant8_buffer[0]);
- 
-     h->dequant_coeff_pps = h1->dequant_coeff_pps;
- 
      // POC timing
      copy_fields(h, h1, poc_lsb, current_slice);
  
@@@ -530,28 -438,12 +451,28 @@@ static int h264_frame_start(H264Contex
      H264Picture *pic;
      int i, ret;
      const int pixel_shift = h->pixel_shift;
 +    int c[4] = {
-         1<<(h->sps.bit_depth_luma-1),
-         1<<(h->sps.bit_depth_chroma-1),
-         1<<(h->sps.bit_depth_chroma-1),
++        1<<(h->ps.sps->bit_depth_luma-1),
++        1<<(h->ps.sps->bit_depth_chroma-1),
++        1<<(h->ps.sps->bit_depth_chroma-1),
 +        -1
 +    };
  
 -    ret = initialize_cur_frame(h);
 -    if (ret < 0)
 -        return ret;
 +    if (!ff_thread_can_start_frame(h->avctx)) {
 +        av_log(h->avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
 +        return -1;
 +    }
 +
 +    release_unused_pictures(h, 1);
 +    h->cur_pic_ptr = NULL;
 +
 +    i = find_unused_picture(h);
 +    if (i < 0) {
 +        av_log(h->avctx, AV_LOG_ERROR, "no frame buffer available\n");
 +        return i;
 +    }
 +    pic = &h->DPB[i];
  
 -    pic = h->cur_pic_ptr;
      pic->reference              = h->droppable ? 0 : h->picture_structure;
      pic->f->coded_picture_number = h->coded_picture_number++;
      pic->field_picture          = h->picture_structure != PICT_FRAME;
@@@ -806,20 -674,20 +727,20 @@@ static void init_scan_tables(H264Contex
          h->field_scan8x8_cavlc[i]  = TRANSPOSE(field_scan8x8_cavlc[i]);
  #undef TRANSPOSE
      }
-     if (h->sps.transform_bypass) { // FIXME same ugly
+     if (h->ps.sps->transform_bypass) { // FIXME same ugly
 -        h->zigzag_scan_q0          = ff_zigzag_scan;
 -        h->zigzag_scan8x8_q0       = ff_zigzag_direct;
 -        h->zigzag_scan8x8_cavlc_q0 = zigzag_scan8x8_cavlc;
 -        h->field_scan_q0           = field_scan;
 -        h->field_scan8x8_q0        = field_scan8x8;
 -        h->field_scan8x8_cavlc_q0  = field_scan8x8_cavlc;
 +        memcpy(h->zigzag_scan_q0          , ff_zigzag_scan          , sizeof(h->zigzag_scan_q0         ));
 +        memcpy(h->zigzag_scan8x8_q0       , ff_zigzag_direct        , sizeof(h->zigzag_scan8x8_q0      ));
 +        memcpy(h->zigzag_scan8x8_cavlc_q0 , zigzag_scan8x8_cavlc    , sizeof(h->zigzag_scan8x8_cavlc_q0));
 +        memcpy(h->field_scan_q0           , field_scan              , sizeof(h->field_scan_q0          ));
 +        memcpy(h->field_scan8x8_q0        , field_scan8x8           , sizeof(h->field_scan8x8_q0       ));
 +        memcpy(h->field_scan8x8_cavlc_q0  , field_scan8x8_cavlc     , sizeof(h->field_scan8x8_cavlc_q0 ));
      } else {
 -        h->zigzag_scan_q0          = h->zigzag_scan;
 -        h->zigzag_scan8x8_q0       = h->zigzag_scan8x8;
 -        h->zigzag_scan8x8_cavlc_q0 = h->zigzag_scan8x8_cavlc;
 -        h->field_scan_q0           = h->field_scan;
 -        h->field_scan8x8_q0        = h->field_scan8x8;
 -        h->field_scan8x8_cavlc_q0  = h->field_scan8x8_cavlc;
 +        memcpy(h->zigzag_scan_q0          , h->zigzag_scan          , sizeof(h->zigzag_scan_q0         ));
 +        memcpy(h->zigzag_scan8x8_q0       , h->zigzag_scan8x8       , sizeof(h->zigzag_scan8x8_q0      ));
 +        memcpy(h->zigzag_scan8x8_cavlc_q0 , h->zigzag_scan8x8_cavlc , sizeof(h->zigzag_scan8x8_cavlc_q0));
 +        memcpy(h->field_scan_q0           , h->field_scan           , sizeof(h->field_scan_q0          ));
 +        memcpy(h->field_scan8x8_q0        , h->field_scan8x8        , sizeof(h->field_scan8x8_q0       ));
 +        memcpy(h->field_scan8x8_cavlc_q0  , h->field_scan8x8_cavlc  , sizeof(h->field_scan8x8_cavlc_q0 ));
      }
  }
  
@@@ -833,9 -700,8 +754,9 @@@ static enum AVPixelFormat get_pixel_for
                       CONFIG_H264_VDPAU_HWACCEL)
      enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
      const enum AVPixelFormat *choices = pix_fmts;
 +    int i;
  
-     switch (h->sps.bit_depth_luma) {
+     switch (h->ps.sps->bit_depth_luma) {
      case 9:
          if (CHROMA444(h)) {
              if (h->avctx->colorspace == AVCOL_SPC_RGB) {
@@@ -938,10 -776,9 +859,11 @@@
  /* export coded and cropped frame dimensions to AVCodecContext */
  static int init_dimensions(H264Context *h)
  {
-     int width  = h->width  - (h->sps.crop_right + h->sps.crop_left);
-     int height = h->height - (h->sps.crop_top   + h->sps.crop_bottom);
-     av_assert0(h->sps.crop_right + h->sps.crop_left < (unsigned)h->width);
-     av_assert0(h->sps.crop_top + h->sps.crop_bottom < (unsigned)h->height);
+     SPS *sps = h->ps.sps;
+     int width  = h->width  - (sps->crop_right + sps->crop_left);
+     int height = h->height - (sps->crop_top   + sps->crop_bottom);
++    av_assert0(sps->crop_right + sps->crop_left < (unsigned)h->width);
++    av_assert0(sps->crop_top + sps->crop_bottom < (unsigned)h->height);
  
      /* handle container cropping */
      if (FFALIGN(h->avctx->width,  16) == FFALIGN(width,  16) &&
@@@ -994,7 -829,7 +917,7 @@@ static int h264_slice_header_init(H264C
          if (h->x264_build < 44U)
              den *= 2;
          av_reduce(&h->avctx->framerate.den, &h->avctx->framerate.num,
-                   h->sps.num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
 -                  sps->num_units_in_tick, den, 1 << 30);
++                  sps->num_units_in_tick * h->avctx->ticks_per_frame, den, 1 << 30);
      }
  
      ff_h264_free_tables(h);
@@@ -1006,43 -841,27 +929,43 @@@
      ret = ff_h264_alloc_tables(h);
      if (ret < 0) {
          av_log(h->avctx, AV_LOG_ERROR, "Could not allocate memory\n");
 -        return ret;
 +        goto fail;
      }
  
 -    if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 10) {
 +#if FF_API_CAP_VDPAU
 +    if (h->avctx->codec &&
 +        h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU &&
-         (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
++        (sps->bit_depth_luma != 8 || sps->chroma_format_idc > 1)) {
 +        av_log(h->avctx, AV_LOG_ERROR,
 +                "VDPAU decoding does not support video colorspace.\n");
 +        ret = AVERROR_INVALIDDATA;
 +        goto fail;
 +    }
 +#endif
 +
-     if (h->sps.bit_depth_luma < 8 || h->sps.bit_depth_luma > 14 ||
-         h->sps.bit_depth_luma == 11 || h->sps.bit_depth_luma == 13
++    if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
++        sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
 +    ) {
          av_log(h->avctx, AV_LOG_ERROR, "Unsupported bit depth %d\n",
-                h->sps.bit_depth_luma);
+                sps->bit_depth_luma);
 -        return AVERROR_INVALIDDATA;
 +        ret = AVERROR_INVALIDDATA;
 +        goto fail;
      }
  
 +    h->cur_bit_depth_luma         =
-     h->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
-     h->cur_chroma_format_idc      = h->sps.chroma_format_idc;
-     h->pixel_shift                = h->sps.bit_depth_luma > 8;
-     h->chroma_format_idc          = h->sps.chroma_format_idc;
-     h->bit_depth_luma             = h->sps.bit_depth_luma;
- 
-     ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
-                     h->sps.chroma_format_idc);
-     ff_h264chroma_init(&h->h264chroma, h->sps.bit_depth_chroma);
-     ff_h264qpel_init(&h->h264qpel, h->sps.bit_depth_luma);
-     ff_h264_pred_init(&h->hpc, h->avctx->codec_id, h->sps.bit_depth_luma,
-                       h->sps.chroma_format_idc);
-     ff_videodsp_init(&h->vdsp, h->sps.bit_depth_luma);
+     h->avctx->bits_per_raw_sample = sps->bit_depth_luma;
++    h->cur_chroma_format_idc      = sps->chroma_format_idc;
+     h->pixel_shift                = sps->bit_depth_luma > 8;
+     h->chroma_format_idc          = sps->chroma_format_idc;
+     h->bit_depth_luma             = sps->bit_depth_luma;
+ 
+     ff_h264dsp_init(&h->h264dsp, sps->bit_depth_luma,
+                     sps->chroma_format_idc);
+     ff_h264chroma_init(&h->h264chroma, sps->bit_depth_chroma);
+     ff_h264qpel_init(&h->h264qpel, sps->bit_depth_luma);
+     ff_h264_pred_init(&h->hpc, h->avctx->codec_id, sps->bit_depth_luma,
+                       sps->chroma_format_idc);
+     ff_videodsp_init(&h->vdsp, sps->bit_depth_luma);
  
      if (nb_slices > H264_MAX_THREADS || (nb_slices > h->mb_height && h->mb_height)) {
          int max_slices;
@@@ -1114,16 -919,10 +1039,15 @@@ int ff_h264_decode_slice_header(H264Con
      int ret;
      unsigned int slice_type, tmp, i, j;
      int last_pic_structure, last_pic_droppable;
 +    int must_reinit;
      int needs_reinit = 0;
      int field_pic_flag, bottom_field_flag;
 +    int first_slice = sl == h->slice_ctx && !h->current_slice;
      int frame_num, droppable, picture_structure;
 -    int mb_aff_frame = 0;
 +    int mb_aff_frame, last_mb_aff_frame;
-     PPS *pps;
 +
 +    if (first_slice)
 +        av_assert0(!h->setup_finished);
  
      h->qpel_put = h->h264qpel.put_h264_qpel_pixels_tab;
      h->qpel_avg = h->h264qpel.avg_h264_qpel_pixels_tab;
@@@ -1231,47 -989,18 +1155,56 @@@
          return AVERROR_INVALIDDATA;
      }
  
-     pps = h->pps_buffers[pps_id];
 -    if (!h->ps.sps_list[h->ps.pps->sps_id]) {
++    pps = (const PPS*)h->ps.pps_list[pps_id]->data;
 +
-     if (!h->sps_buffers[pps->sps_id]) {
++    if (!h->ps.sps_list[pps->sps_id]) {
          av_log(h->avctx, AV_LOG_ERROR,
                 "non-existing SPS %u referenced\n",
-                h->pps.sps_id);
 -               h->ps.pps->sps_id);
++               pps->sps_id);
          return AVERROR_INVALIDDATA;
      }
  
 -    if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data) {
 -        h->ps.sps = (SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data;
 +    if (first_slice) {
-         h->pps = *h->pps_buffers[pps_id];
++        av_buffer_unref(&h->ps.pps_ref);
++        h->ps.pps = NULL;
++        h->ps.pps_ref = av_buffer_ref(h->ps.pps_list[pps_id]);
++        if (!h->ps.pps_ref)
++            return AVERROR(ENOMEM);
++        h->ps.pps = (const PPS*)h->ps.pps_ref->data;
 +    } else {
-         if (h->pps.sps_id != pps->sps_id ||
-             h->pps.transform_8x8_mode != pps->transform_8x8_mode ||
-             (h->setup_finished && h->dequant_coeff_pps != pps_id)) {
++        if (h->ps.pps->sps_id != pps->sps_id ||
++            h->ps.pps->transform_8x8_mode != pps->transform_8x8_mode /*||
++            (h->setup_finished && h->ps.pps != pps)*/) {
 +            av_log(h->avctx, AV_LOG_ERROR, "PPS changed between slices\n");
 +            return AVERROR_INVALIDDATA;
 +        }
 +    }
 +
-     if (pps->sps_id != h->sps.sps_id ||
-         pps->sps_id != h->current_sps_id ||
-         h->sps_buffers[pps->sps_id]->new) {
++    if (h->ps.sps != (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data ||
++        pps->sps_id != h->current_sps_id) {
 +
 +        if (!first_slice) {
 +            av_log(h->avctx, AV_LOG_ERROR,
 +               "SPS changed in the middle of the frame\n");
 +            return AVERROR_INVALIDDATA;
 +        }
 +
-         h->sps = *h->sps_buffers[h->pps.sps_id];
- 
-         if (h->mb_width  != h->sps.mb_width ||
-             h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) ||
-             h->cur_bit_depth_luma    != h->sps.bit_depth_luma ||
-             h->cur_chroma_format_idc != h->sps.chroma_format_idc
++        av_buffer_unref(&h->ps.sps_ref);
++        h->ps.sps = NULL;
++        h->ps.sps_ref = av_buffer_ref(h->ps.sps_list[h->ps.pps->sps_id]);
++        if (!h->ps.sps_ref)
++            return AVERROR(ENOMEM);
++        h->ps.sps = (const SPS*)h->ps.sps_ref->data;
++
++        if (h->mb_width  != h->ps.sps->mb_width ||
++            h->mb_height != h->ps.sps->mb_height * (2 - h->ps.sps->frame_mbs_only_flag) ||
++            h->cur_bit_depth_luma    != h->ps.sps->bit_depth_luma ||
++            h->cur_chroma_format_idc != h->ps.sps->chroma_format_idc
 +        )
 +            needs_reinit = 1;
  
-         if (h->bit_depth_luma    != h->sps.bit_depth_luma ||
-             h->chroma_format_idc != h->sps.chroma_format_idc)
+         if (h->bit_depth_luma    != h->ps.sps->bit_depth_luma ||
+             h->chroma_format_idc != h->ps.sps->chroma_format_idc)
              needs_reinit         = 1;
  
          if (h->flags & AV_CODEC_FLAG_LOW_DELAY ||
@@@ -1289,28 -1018,20 +1222,31 @@@
  
      }
  
+     pps = h->ps.pps;
+     sps = h->ps.sps;
+ 
 +    must_reinit = (h->context_initialized &&
-                     (   16*h->sps.mb_width != h->avctx->coded_width
-                      || 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != h->avctx->coded_height
-                      || h->cur_bit_depth_luma    != h->sps.bit_depth_luma
-                      || h->cur_chroma_format_idc != h->sps.chroma_format_idc
-                      || h->mb_width  != h->sps.mb_width
-                      || h->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag)
++                    (   16*sps->mb_width != h->avctx->coded_width
++                     || 16*sps->mb_height * (2 - sps->frame_mbs_only_flag) != h->avctx->coded_height
++                     || h->cur_bit_depth_luma    != sps->bit_depth_luma
++                     || h->cur_chroma_format_idc != sps->chroma_format_idc
++                     || h->mb_width  != sps->mb_width
++                     || h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag)
 +                    ));
 +    if (h->avctx->pix_fmt == AV_PIX_FMT_NONE
 +        || (non_j_pixfmt(h->avctx->pix_fmt) != non_j_pixfmt(get_pixel_format(h, 0))))
 +        must_reinit = 1;
 +
-     if (first_slice && av_cmp_q(h->sps.sar, h->avctx->sample_aspect_ratio))
++    if (first_slice && av_cmp_q(sps->sar, h->avctx->sample_aspect_ratio))
 +        must_reinit = 1;
 +
      if (!h->setup_finished) {
-         h->avctx->profile = ff_h264_get_profile(&h->sps);
-         h->avctx->level   = h->sps.level_idc;
-         h->avctx->refs    = h->sps.ref_frame_count;
+         h->avctx->profile = ff_h264_get_profile(sps);
+         h->avctx->level   = sps->level_idc;
+         h->avctx->refs    = sps->ref_frame_count;
  
-         h->mb_width  = h->sps.mb_width;
-         h->mb_height = h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag);
 -        if (h->mb_width  != sps->mb_width ||
 -            h->mb_height != sps->mb_height * (2 - sps->frame_mbs_only_flag))
 -            needs_reinit = 1;
 -
+         h->mb_width  = sps->mb_width;
+         h->mb_height = sps->mb_height * (2 - sps->frame_mbs_only_flag);
          h->mb_num    = h->mb_width * h->mb_height;
          h->mb_stride = h->mb_width + 1;
  
@@@ -1325,15 -1046,15 +1261,15 @@@
          if (ret < 0)
              return ret;
  
-         if (h->sps.video_signal_type_present_flag) {
-             h->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG
+         if (sps->video_signal_type_present_flag) {
 -            h->avctx->color_range = sps->full_range ? AVCOL_RANGE_JPEG
 -                : AVCOL_RANGE_MPEG;
++            h->avctx->color_range = sps->full_range > 0 ? AVCOL_RANGE_JPEG
 +                                                        : AVCOL_RANGE_MPEG;
-             if (h->sps.colour_description_present_flag) {
-                 if (h->avctx->colorspace != h->sps.colorspace)
+             if (sps->colour_description_present_flag) {
+                 if (h->avctx->colorspace != sps->colorspace)
                      needs_reinit = 1;
-                 h->avctx->color_primaries = h->sps.color_primaries;
-                 h->avctx->color_trc       = h->sps.color_trc;
-                 h->avctx->colorspace      = h->sps.colorspace;
+                 h->avctx->color_primaries = sps->color_primaries;
+                 h->avctx->color_trc       = sps->color_trc;
+                 h->avctx->colorspace      = sps->colorspace;
              }
          }
      }
@@@ -1386,20 -1104,7 +1322,15 @@@
          }
      }
  
-     if (!h->current_slice && h->dequant_coeff_pps != pps_id) {
-         h->dequant_coeff_pps = pps_id;
-         ff_h264_init_dequant_tables(h);
-     }
- 
-     frame_num = get_bits(&sl->gb, h->sps.log2_max_frame_num);
+     frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
 +    if (!first_slice) {
 +        if (h->frame_num != frame_num) {
 +            av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
 +                   h->frame_num, frame_num);
 +            return AVERROR_INVALIDDATA;
 +        }
 +    }
 +
      if (!h->setup_finished)
          h->frame_num = frame_num;
  
@@@ -1410,28 -1114,28 +1341,28 @@@
      last_pic_droppable = h->droppable;
  
      droppable = h->nal_ref_idc == 0;
-     if (h->sps.frame_mbs_only_flag) {
+     if (sps->frame_mbs_only_flag) {
          picture_structure = PICT_FRAME;
      } else {
-         if (!h->sps.direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
++        if (!h->ps.sps->direct_8x8_inference_flag && slice_type == AV_PICTURE_TYPE_B) {
 +            av_log(h->avctx, AV_LOG_ERROR, "This stream was generated by a broken encoder, invalid 8x8 inference\n");
 +            return -1;
 +        }
          field_pic_flag = get_bits1(&sl->gb);
 +
          if (field_pic_flag) {
              bottom_field_flag = get_bits1(&sl->gb);
              picture_structure = PICT_TOP_FIELD + bottom_field_flag;
          } else {
              picture_structure = PICT_FRAME;
-             mb_aff_frame      = h->sps.mb_aff;
+             mb_aff_frame      = sps->mb_aff;
          }
      }
 -    if (!h->setup_finished) {
 -        h->droppable         = droppable;
 -        h->picture_structure = picture_structure;
 -        h->mb_aff_frame      = mb_aff_frame;
 -    }
 -    sl->mb_field_decoding_flag = h->picture_structure != PICT_FRAME;
  
 -    if (h->current_slice != 0) {
 +    if (h->current_slice) {
          if (last_pic_structure != picture_structure ||
 -            last_pic_droppable != droppable) {
 +            last_pic_droppable != droppable ||
 +            last_mb_aff_frame  != mb_aff_frame) {
              av_log(h->avctx, AV_LOG_ERROR,
                     "Changing field mode (%d -> %d) between slices is not allowed\n",
                     last_pic_structure, h->picture_structure);
@@@ -1527,24 -1216,20 +1458,24 @@@
              }
          }
  
 -        while (h->frame_num != h->prev_frame_num &&
 +        while (h->frame_num != h->prev_frame_num && !h->first_field &&
-                h->frame_num != (h->prev_frame_num + 1) % (1 << h->sps.log2_max_frame_num)) {
+                h->frame_num != (h->prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
              H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
              av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
                     h->frame_num, h->prev_frame_num);
-             if (!h->sps.gaps_in_frame_num_allowed_flag)
 -            ret = initialize_cur_frame(h);
++            if (!sps->gaps_in_frame_num_allowed_flag)
 +                for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
 +                    h->last_pocs[i] = INT_MIN;
 +            ret = h264_frame_start(h);
              if (ret < 0) {
                  h->first_field = 0;
                  return ret;
              }
  
              h->prev_frame_num++;
-             h->prev_frame_num        %= 1 << h->sps.log2_max_frame_num;
+             h->prev_frame_num        %= 1 << sps->log2_max_frame_num;
              h->cur_pic_ptr->frame_num = h->prev_frame_num;
-             h->cur_pic_ptr->invalid_gap = !h->sps.gaps_in_frame_num_allowed_flag;
++            h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
              ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
              ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
              ret = ff_generate_sliding_window_mmcos(h, 1);
@@@ -1654,10 -1326,10 +1585,10 @@@
      }
  
      if (h->nal_unit_type == NAL_IDR_SLICE)
 -        get_ue_golomb(&sl->gb); /* idr_pic_id */
 +        get_ue_golomb_long(&sl->gb); /* idr_pic_id */
  
-     if (h->sps.poc_type == 0) {
-         int poc_lsb = get_bits(&sl->gb, h->sps.log2_max_poc_lsb);
+     if (sps->poc_type == 0) {
+         int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
  
          if (!h->setup_finished)
              h->poc_lsb = poc_lsb;
@@@ -1693,8 -1365,8 +1624,8 @@@
          sl->direct_spatial_mv_pred = get_bits1(&sl->gb);
  
      ret = ff_h264_parse_ref_count(&sl->list_count, sl->ref_count,
-                                   &sl->gb, &h->pps, sl->slice_type_nos,
+                                   &sl->gb, pps, sl->slice_type_nos,
 -                                  h->picture_structure);
 +                                  h->picture_structure, h->avctx);
      if (ret < 0)
          return ret;
  
@@@ -1838,19 -1505,14 +1769,19 @@@
      sl->qp_thresh = 15 -
                     FFMIN(sl->slice_alpha_c0_offset, sl->slice_beta_offset) -
                     FFMAX3(0,
-                           h->pps.chroma_qp_index_offset[0],
-                           h->pps.chroma_qp_index_offset[1]) +
-                    6 * (h->sps.bit_depth_luma - 8);
+                           pps->chroma_qp_index_offset[0],
+                           pps->chroma_qp_index_offset[1]) +
+                    6 * (sps->bit_depth_luma - 8);
  
      sl->slice_num       = ++h->current_slice;
 -    if (sl->slice_num >= MAX_SLICES) {
 -        av_log(h->avctx, AV_LOG_ERROR,
 -               "Too many slices, increase MAX_SLICES and recompile\n");
 +
 +    if (sl->slice_num)
 +        h->slice_row[(sl->slice_num-1)&(MAX_SLICES-1)]= sl->resync_mb_y;
 +    if (   h->slice_row[sl->slice_num&(MAX_SLICES-1)] + 3 >= sl->resync_mb_y
 +        && h->slice_row[sl->slice_num&(MAX_SLICES-1)] <= sl->resync_mb_y
 +        && sl->slice_num >= MAX_SLICES) {
 +        //in case of ASO this check needs to be updated depending on how we decide to assign slice numbers in this case
 +        av_log(h->avctx, AV_LOG_WARNING, "Possibly too many slices (%d >= %d), increase MAX_SLICES and recompile if there are artifacts\n", sl->slice_num, MAX_SLICES);
      }
  
      for (j = 0; j < 2; j++) {
@@@ -1886,11 -1548,6 +1817,9 @@@
                               (sl->ref_list[j][i].reference & 3);
      }
  
 +    h->au_pps_id = pps_id;
-     h->sps.new =
-     h->sps_buffers[h->pps.sps_id]->new = 0;
-     h->current_sps_id = h->pps.sps_id;
++    h->current_sps_id = h->ps.pps->sps_id;
 +
      if (h->avctx->debug & FF_DEBUG_PICT_INFO) {
          av_log(h->avctx, AV_LOG_DEBUG,
                 "slice:%d %s mb:%d %c%s%s pps:%u frame:%d poc:%d/%d ref:%d/%d qp:%d loop:%d:%d:%d weight:%d%s %s\n",
@@@ -2311,17 -1967,7 +2240,17 @@@ static int decode_slice(struct AVCodecC
                       avctx->codec_id != AV_CODEC_ID_H264 ||
                       (CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
  
 +    if (!(h->avctx->active_thread_type & FF_THREAD_SLICE) && h->picture_structure == PICT_FRAME && h->slice_ctx[0].er.error_status_table) {
 +        const int start_i  = av_clip(sl->resync_mb_x + sl->resync_mb_y * h->mb_width, 0, h->mb_num - 1);
 +        if (start_i) {
 +            int prev_status = h->slice_ctx[0].er.error_status_table[h->slice_ctx[0].er.mb_index2xy[start_i - 1]];
 +            prev_status &= ~ VP_START;
 +            if (prev_status != (ER_MV_END | ER_DC_END | ER_AC_END))
 +                h->slice_ctx[0].er.error_occurred = 1;
 +        }
 +    }
 +
-     if (h->pps.cabac) {
+     if (h->ps.pps->cabac) {
          /* realign */
          align_get_bits(&sl->gb);
  
diff --cc libavcodec/vaapi_h264.c
index 7470a16,9c17ac1..9b13fa9
--- a/libavcodec/vaapi_h264.c
+++ b/libavcodec/vaapi_h264.c
@@@ -227,7 -226,9 +227,9 @@@ static int vaapi_h264_start_frame(AVCod
                                    av_unused uint32_t       size)
  {
      H264Context * const h = avctx->priv_data;
 -    struct vaapi_context * const vactx = avctx->hwaccel_context;
 +    FFVAContext * const vactx = ff_vaapi_get_context(avctx);
+     const PPS *pps = h->ps.pps;
+     const SPS *sps = h->ps.sps;
      VAPictureParameterBufferH264 *pic_param;
      VAIQMatrixBufferH264 *iq_matrix;
  
@@@ -244,38 -245,38 +246,38 @@@
          return -1;
      pic_param->picture_width_in_mbs_minus1                      = h->mb_width - 1;
      pic_param->picture_height_in_mbs_minus1                     = h->mb_height - 1;
-     pic_param->bit_depth_luma_minus8                            = h->sps.bit_depth_luma - 8;
-     pic_param->bit_depth_chroma_minus8                          = h->sps.bit_depth_chroma - 8;
-     pic_param->num_ref_frames                                   = h->sps.ref_frame_count;
+     pic_param->bit_depth_luma_minus8                            = sps->bit_depth_luma - 8;
+     pic_param->bit_depth_chroma_minus8                          = sps->bit_depth_chroma - 8;
+     pic_param->num_ref_frames                                   = sps->ref_frame_count;
      pic_param->seq_fields.value                                 = 0; /* reset all bits */
-     pic_param->seq_fields.bits.chroma_format_idc                = h->sps.chroma_format_idc;
-     pic_param->seq_fields.bits.residual_colour_transform_flag   = h->sps.residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
-     pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag;
-     pic_param->seq_fields.bits.frame_mbs_only_flag              = h->sps.frame_mbs_only_flag;
-     pic_param->seq_fields.bits.mb_adaptive_frame_field_flag     = h->sps.mb_aff;
-     pic_param->seq_fields.bits.direct_8x8_inference_flag        = h->sps.direct_8x8_inference_flag;
-     pic_param->seq_fields.bits.MinLumaBiPredSize8x8             = h->sps.level_idc >= 31; /* A.3.3.2 */
-     pic_param->seq_fields.bits.log2_max_frame_num_minus4        = h->sps.log2_max_frame_num - 4;
-     pic_param->seq_fields.bits.pic_order_cnt_type               = h->sps.poc_type;
-     pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;
-     pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
-     pic_param->num_slice_groups_minus1                          = h->pps.slice_group_count - 1;
-     pic_param->slice_group_map_type                             = h->pps.mb_slice_group_map_type;
+     pic_param->seq_fields.bits.chroma_format_idc                = sps->chroma_format_idc;
+     pic_param->seq_fields.bits.residual_colour_transform_flag   = sps->residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
+     pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = sps->gaps_in_frame_num_allowed_flag;
+     pic_param->seq_fields.bits.frame_mbs_only_flag              = sps->frame_mbs_only_flag;
+     pic_param->seq_fields.bits.mb_adaptive_frame_field_flag     = sps->mb_aff;
+     pic_param->seq_fields.bits.direct_8x8_inference_flag        = sps->direct_8x8_inference_flag;
+     pic_param->seq_fields.bits.MinLumaBiPredSize8x8             = sps->level_idc >= 31; /* A.3.3.2 */
+     pic_param->seq_fields.bits.log2_max_frame_num_minus4        = sps->log2_max_frame_num - 4;
+     pic_param->seq_fields.bits.pic_order_cnt_type               = sps->poc_type;
+     pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4;
+     pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
+     pic_param->num_slice_groups_minus1                          = pps->slice_group_count - 1;
+     pic_param->slice_group_map_type                             = pps->mb_slice_group_map_type;
 -    pic_param->slice_group_change_rate_minus1                   = 0; /* XXX: unimplemented in Libav */
 +    pic_param->slice_group_change_rate_minus1                   = 0; /* XXX: unimplemented in FFmpeg */
-     pic_param->pic_init_qp_minus26                              = h->pps.init_qp - 26;
-     pic_param->pic_init_qs_minus26                              = h->pps.init_qs - 26;
-     pic_param->chroma_qp_index_offset                           = h->pps.chroma_qp_index_offset[0];
-     pic_param->second_chroma_qp_index_offset                    = h->pps.chroma_qp_index_offset[1];
+     pic_param->pic_init_qp_minus26                              = pps->init_qp - 26;
+     pic_param->pic_init_qs_minus26                              = pps->init_qs - 26;
+     pic_param->chroma_qp_index_offset                           = pps->chroma_qp_index_offset[0];
+     pic_param->second_chroma_qp_index_offset                    = pps->chroma_qp_index_offset[1];
      pic_param->pic_fields.value                                 = 0; /* reset all bits */
-     pic_param->pic_fields.bits.entropy_coding_mode_flag         = h->pps.cabac;
-     pic_param->pic_fields.bits.weighted_pred_flag               = h->pps.weighted_pred;
-     pic_param->pic_fields.bits.weighted_bipred_idc              = h->pps.weighted_bipred_idc;
-     pic_param->pic_fields.bits.transform_8x8_mode_flag          = h->pps.transform_8x8_mode;
+     pic_param->pic_fields.bits.entropy_coding_mode_flag         = pps->cabac;
+     pic_param->pic_fields.bits.weighted_pred_flag               = pps->weighted_pred;
+     pic_param->pic_fields.bits.weighted_bipred_idc              = pps->weighted_bipred_idc;
+     pic_param->pic_fields.bits.transform_8x8_mode_flag          = pps->transform_8x8_mode;
      pic_param->pic_fields.bits.field_pic_flag                   = h->picture_structure != PICT_FRAME;
-     pic_param->pic_fields.bits.constrained_intra_pred_flag      = h->pps.constrained_intra_pred;
-     pic_param->pic_fields.bits.pic_order_present_flag           = h->pps.pic_order_present;
-     pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
-     pic_param->pic_fields.bits.redundant_pic_cnt_present_flag   = h->pps.redundant_pic_cnt_present;
+     pic_param->pic_fields.bits.constrained_intra_pred_flag      = pps->constrained_intra_pred;
+     pic_param->pic_fields.bits.pic_order_present_flag           = pps->pic_order_present;
+     pic_param->pic_fields.bits.deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
+     pic_param->pic_fields.bits.redundant_pic_cnt_present_flag   = pps->redundant_pic_cnt_present;
      pic_param->pic_fields.bits.reference_pic_flag               = h->nal_ref_idc != 0;
      pic_param->frame_num                                        = h->frame_num;
  
diff --cc libavcodec/vdpau.c
index 7f8690e,bf5f8d9..d791d15
--- a/libavcodec/vdpau.c
+++ b/libavcodec/vdpau.c
@@@ -355,345 -317,6 +355,345 @@@ int ff_vdpau_add_buffer(struct vdpau_pi
      return 0;
  }
  
 +/* Obsolete non-hwaccel VDPAU support below... */
 +
 +#if FF_API_VDPAU
 +void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
 +{
 +    struct vdpau_render_state *render = (struct vdpau_render_state*)data;
 +    assert(render);
 +
 +    render->bitstream_buffers= av_fast_realloc(
 +        render->bitstream_buffers,
 +        &render->bitstream_buffers_allocated,
 +        sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
 +    );
 +
 +    render->bitstream_buffers[render->bitstream_buffers_used].struct_version  = VDP_BITSTREAM_BUFFER_VERSION;
 +    render->bitstream_buffers[render->bitstream_buffers_used].bitstream       = buf;
 +    render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
 +    render->bitstream_buffers_used++;
 +}
 +
 +#if CONFIG_H264_VDPAU_DECODER
 +void ff_vdpau_h264_set_reference_frames(H264Context *h)
 +{
 +    struct vdpau_render_state *render, *render_ref;
 +    VdpReferenceFrameH264 *rf, *rf2;
 +    H264Picture *pic;
 +    int i, list, pic_frame_idx;
 +
 +    render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
 +    assert(render);
 +
 +    rf = &render->info.h264.referenceFrames[0];
 +#define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames)
 +
 +    for (list = 0; list < 2; ++list) {
 +        H264Picture **lp = list ? h->long_ref : h->short_ref;
 +        int ls = list ? 16 : h->short_ref_count;
 +
 +        for (i = 0; i < ls; ++i) {
 +            pic = lp[i];
 +            if (!pic || !pic->reference)
 +                continue;
 +            pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
 +
 +            render_ref = (struct vdpau_render_state *)pic->f->data[0];
 +            assert(render_ref);
 +
 +            rf2 = &render->info.h264.referenceFrames[0];
 +            while (rf2 != rf) {
 +                if (
 +                    (rf2->surface == render_ref->surface)
 +                    && (rf2->is_long_term == pic->long_ref)
 +                    && (rf2->frame_idx == pic_frame_idx)
 +                )
 +                    break;
 +                ++rf2;
 +            }
 +            if (rf2 != rf) {
 +                rf2->top_is_reference    |= (pic->reference & PICT_TOP_FIELD)    ? VDP_TRUE : VDP_FALSE;
 +                rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
 +                continue;
 +            }
 +
 +            if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT])
 +                continue;
 +
 +            rf->surface             = render_ref->surface;
 +            rf->is_long_term        = pic->long_ref;
 +            rf->top_is_reference    = (pic->reference & PICT_TOP_FIELD)    ? VDP_TRUE : VDP_FALSE;
 +            rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
 +            rf->field_order_cnt[0]  = pic->field_poc[0];
 +            rf->field_order_cnt[1]  = pic->field_poc[1];
 +            rf->frame_idx           = pic_frame_idx;
 +
 +            ++rf;
 +        }
 +    }
 +
 +    for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) {
 +        rf->surface             = VDP_INVALID_HANDLE;
 +        rf->is_long_term        = 0;
 +        rf->top_is_reference    = 0;
 +        rf->bottom_is_reference = 0;
 +        rf->field_order_cnt[0]  = 0;
 +        rf->field_order_cnt[1]  = 0;
 +        rf->frame_idx           = 0;
 +    }
 +}
 +
 +void ff_vdpau_h264_picture_start(H264Context *h)
 +{
 +    struct vdpau_render_state *render;
 +    int i;
 +
 +    render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
 +    assert(render);
 +
 +    for (i = 0; i < 2; ++i) {
 +        int foc = h->cur_pic_ptr->field_poc[i];
 +        if (foc == INT_MAX)
 +            foc = 0;
 +        render->info.h264.field_order_cnt[i] = foc;
 +    }
 +
 +    render->info.h264.frame_num = h->frame_num;
 +}
 +
 +void ff_vdpau_h264_picture_complete(H264Context *h)
 +{
 +    struct vdpau_render_state *render;
 +
 +    render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
 +    assert(render);
 +
 +    render->info.h264.slice_count = h->current_slice;
 +    if (render->info.h264.slice_count < 1)
 +        return;
 +
 +    render->info.h264.is_reference                           = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
 +    render->info.h264.field_pic_flag                         = h->picture_structure != PICT_FRAME;
 +    render->info.h264.bottom_field_flag                      = h->picture_structure == PICT_BOTTOM_FIELD;
-     render->info.h264.num_ref_frames                         = h->sps.ref_frame_count;
-     render->info.h264.mb_adaptive_frame_field_flag           = h->sps.mb_aff && !render->info.h264.field_pic_flag;
-     render->info.h264.constrained_intra_pred_flag            = h->pps.constrained_intra_pred;
-     render->info.h264.weighted_pred_flag                     = h->pps.weighted_pred;
-     render->info.h264.weighted_bipred_idc                    = h->pps.weighted_bipred_idc;
-     render->info.h264.frame_mbs_only_flag                    = h->sps.frame_mbs_only_flag;
-     render->info.h264.transform_8x8_mode_flag                = h->pps.transform_8x8_mode;
-     render->info.h264.chroma_qp_index_offset                 = h->pps.chroma_qp_index_offset[0];
-     render->info.h264.second_chroma_qp_index_offset          = h->pps.chroma_qp_index_offset[1];
-     render->info.h264.pic_init_qp_minus26                    = h->pps.init_qp - 26;
-     render->info.h264.num_ref_idx_l0_active_minus1           = h->pps.ref_count[0] - 1;
-     render->info.h264.num_ref_idx_l1_active_minus1           = h->pps.ref_count[1] - 1;
-     render->info.h264.log2_max_frame_num_minus4              = h->sps.log2_max_frame_num - 4;
-     render->info.h264.pic_order_cnt_type                     = h->sps.poc_type;
-     render->info.h264.log2_max_pic_order_cnt_lsb_minus4      = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4;
-     render->info.h264.delta_pic_order_always_zero_flag       = h->sps.delta_pic_order_always_zero_flag;
-     render->info.h264.direct_8x8_inference_flag              = h->sps.direct_8x8_inference_flag;
-     render->info.h264.entropy_coding_mode_flag               = h->pps.cabac;
-     render->info.h264.pic_order_present_flag                 = h->pps.pic_order_present;
-     render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
-     render->info.h264.redundant_pic_cnt_present_flag         = h->pps.redundant_pic_cnt_present;
-     memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
-     memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
-     memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
++    render->info.h264.num_ref_frames                         = h->ps.sps->ref_frame_count;
++    render->info.h264.mb_adaptive_frame_field_flag           = h->ps.sps->mb_aff && !render->info.h264.field_pic_flag;
++    render->info.h264.constrained_intra_pred_flag            = h->ps.pps->constrained_intra_pred;
++    render->info.h264.weighted_pred_flag                     = h->ps.pps->weighted_pred;
++    render->info.h264.weighted_bipred_idc                    = h->ps.pps->weighted_bipred_idc;
++    render->info.h264.frame_mbs_only_flag                    = h->ps.sps->frame_mbs_only_flag;
++    render->info.h264.transform_8x8_mode_flag                = h->ps.pps->transform_8x8_mode;
++    render->info.h264.chroma_qp_index_offset                 = h->ps.pps->chroma_qp_index_offset[0];
++    render->info.h264.second_chroma_qp_index_offset          = h->ps.pps->chroma_qp_index_offset[1];
++    render->info.h264.pic_init_qp_minus26                    = h->ps.pps->init_qp - 26;
++    render->info.h264.num_ref_idx_l0_active_minus1           = h->ps.pps->ref_count[0] - 1;
++    render->info.h264.num_ref_idx_l1_active_minus1           = h->ps.pps->ref_count[1] - 1;
++    render->info.h264.log2_max_frame_num_minus4              = h->ps.sps->log2_max_frame_num - 4;
++    render->info.h264.pic_order_cnt_type                     = h->ps.sps->poc_type;
++    render->info.h264.log2_max_pic_order_cnt_lsb_minus4      = h->ps.sps->poc_type ? 0 : h->ps.sps->log2_max_poc_lsb - 4;
++    render->info.h264.delta_pic_order_always_zero_flag       = h->ps.sps->delta_pic_order_always_zero_flag;
++    render->info.h264.direct_8x8_inference_flag              = h->ps.sps->direct_8x8_inference_flag;
++    render->info.h264.entropy_coding_mode_flag               = h->ps.pps->cabac;
++    render->info.h264.pic_order_present_flag                 = h->ps.pps->pic_order_present;
++    render->info.h264.deblocking_filter_control_present_flag = h->ps.pps->deblocking_filter_parameters_present;
++    render->info.h264.redundant_pic_cnt_present_flag         = h->ps.pps->redundant_pic_cnt_present;
++    memcpy(render->info.h264.scaling_lists_4x4, h->ps.pps->scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
++    memcpy(render->info.h264.scaling_lists_8x8[0], h->ps.pps->scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
++    memcpy(render->info.h264.scaling_lists_8x8[1], h->ps.pps->scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
 +
 +    ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height);
 +    render->bitstream_buffers_used = 0;
 +}
 +#endif /* CONFIG_H264_VDPAU_DECODER */
 +
 +#if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER
 +void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
 +                                    int buf_size, int slice_count)
 +{
 +    struct vdpau_render_state *render, *last, *next;
 +    int i;
 +
 +    if (!s->current_picture_ptr) return;
 +
 +    render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
 +    assert(render);
 +
 +    /* fill VdpPictureInfoMPEG1Or2 struct */
 +    render->info.mpeg.picture_structure          = s->picture_structure;
 +    render->info.mpeg.picture_coding_type        = s->pict_type;
 +    render->info.mpeg.intra_dc_precision         = s->intra_dc_precision;
 +    render->info.mpeg.frame_pred_frame_dct       = s->frame_pred_frame_dct;
 +    render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
 +    render->info.mpeg.intra_vlc_format           = s->intra_vlc_format;
 +    render->info.mpeg.alternate_scan             = s->alternate_scan;
 +    render->info.mpeg.q_scale_type               = s->q_scale_type;
 +    render->info.mpeg.top_field_first            = s->top_field_first;
 +    render->info.mpeg.full_pel_forward_vector    = s->full_pel[0]; // MPEG-1 only.  Set 0 for MPEG-2
 +    render->info.mpeg.full_pel_backward_vector   = s->full_pel[1]; // MPEG-1 only.  Set 0 for MPEG-2
 +    render->info.mpeg.f_code[0][0]               = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
 +    render->info.mpeg.f_code[0][1]               = s->mpeg_f_code[0][1];
 +    render->info.mpeg.f_code[1][0]               = s->mpeg_f_code[1][0];
 +    render->info.mpeg.f_code[1][1]               = s->mpeg_f_code[1][1];
 +    for (i = 0; i < 64; ++i) {
 +        render->info.mpeg.intra_quantizer_matrix[i]     = s->intra_matrix[i];
 +        render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
 +    }
 +
 +    render->info.mpeg.forward_reference          = VDP_INVALID_HANDLE;
 +    render->info.mpeg.backward_reference         = VDP_INVALID_HANDLE;
 +
 +    switch(s->pict_type){
 +    case  AV_PICTURE_TYPE_B:
 +        next = (struct vdpau_render_state *)s->next_picture.f->data[0];
 +        assert(next);
 +        render->info.mpeg.backward_reference     = next->surface;
 +        // no return here, going to set forward prediction
 +    case  AV_PICTURE_TYPE_P:
 +        last = (struct vdpau_render_state *)s->last_picture.f->data[0];
 +        if (!last) // FIXME: Does this test make sense?
 +            last = render; // predict second field from the first
 +        render->info.mpeg.forward_reference      = last->surface;
 +    }
 +
 +    ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
 +
 +    render->info.mpeg.slice_count                = slice_count;
 +
 +    if (slice_count)
 +        ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
 +    render->bitstream_buffers_used               = 0;
 +}
 +#endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */
 +
 +#if CONFIG_VC1_VDPAU_DECODER
 +void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
 +                                 int buf_size)
 +{
 +    VC1Context *v = s->avctx->priv_data;
 +    struct vdpau_render_state *render, *last, *next;
 +
 +    render = (struct vdpau_render_state *)s->current_picture.f->data[0];
 +    assert(render);
 +
 +    /*  fill LvPictureInfoVC1 struct */
 +    render->info.vc1.frame_coding_mode  = v->fcm ? v->fcm + 1 : 0;
 +    render->info.vc1.postprocflag       = v->postprocflag;
 +    render->info.vc1.pulldown           = v->broadcast;
 +    render->info.vc1.interlace          = v->interlace;
 +    render->info.vc1.tfcntrflag         = v->tfcntrflag;
 +    render->info.vc1.finterpflag        = v->finterpflag;
 +    render->info.vc1.psf                = v->psf;
 +    render->info.vc1.dquant             = v->dquant;
 +    render->info.vc1.panscan_flag       = v->panscanflag;
 +    render->info.vc1.refdist_flag       = v->refdist_flag;
 +    render->info.vc1.quantizer          = v->quantizer_mode;
 +    render->info.vc1.extended_mv        = v->extended_mv;
 +    render->info.vc1.extended_dmv       = v->extended_dmv;
 +    render->info.vc1.overlap            = v->overlap;
 +    render->info.vc1.vstransform        = v->vstransform;
 +    render->info.vc1.loopfilter         = v->s.loop_filter;
 +    render->info.vc1.fastuvmc           = v->fastuvmc;
 +    render->info.vc1.range_mapy_flag    = v->range_mapy_flag;
 +    render->info.vc1.range_mapy         = v->range_mapy;
 +    render->info.vc1.range_mapuv_flag   = v->range_mapuv_flag;
 +    render->info.vc1.range_mapuv        = v->range_mapuv;
 +    /* Specific to simple/main profile only */
 +    render->info.vc1.multires           = v->multires;
 +    render->info.vc1.syncmarker         = v->resync_marker;
 +    render->info.vc1.rangered           = v->rangered | (v->rangeredfrm << 1);
 +    render->info.vc1.maxbframes         = v->s.max_b_frames;
 +
 +    render->info.vc1.deblockEnable      = v->postprocflag & 1;
 +    render->info.vc1.pquant             = v->pq;
 +
 +    render->info.vc1.forward_reference  = VDP_INVALID_HANDLE;
 +    render->info.vc1.backward_reference = VDP_INVALID_HANDLE;
 +
 +    if (v->bi_type)
 +        render->info.vc1.picture_type = 4;
 +    else
 +        render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
 +
 +    switch(s->pict_type){
 +    case  AV_PICTURE_TYPE_B:
 +        next = (struct vdpau_render_state *)s->next_picture.f->data[0];
 +        assert(next);
 +        render->info.vc1.backward_reference = next->surface;
 +        // no break here, going to set forward prediction
 +    case  AV_PICTURE_TYPE_P:
 +        last = (struct vdpau_render_state *)s->last_picture.f->data[0];
 +        if (!last) // FIXME: Does this test make sense?
 +            last = render; // predict second field from the first
 +        render->info.vc1.forward_reference = last->surface;
 +    }
 +
 +    ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
 +
 +    render->info.vc1.slice_count          = 1;
 +
 +    ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
 +    render->bitstream_buffers_used        = 0;
 +}
 +#endif /* (CONFIG_VC1_VDPAU_DECODER */
 +
 +#if CONFIG_MPEG4_VDPAU_DECODER
 +void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *ctx, const uint8_t *buf,
 +                                   int buf_size)
 +{
 +    MpegEncContext *s = &ctx->m;
 +    struct vdpau_render_state *render, *last, *next;
 +    int i;
 +
 +    if (!s->current_picture_ptr) return;
 +
 +    render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
 +    assert(render);
 +
 +    /* fill VdpPictureInfoMPEG4Part2 struct */
 +    render->info.mpeg4.trd[0]                            = s->pp_time;
 +    render->info.mpeg4.trb[0]                            = s->pb_time;
 +    render->info.mpeg4.trd[1]                            = s->pp_field_time >> 1;
 +    render->info.mpeg4.trb[1]                            = s->pb_field_time >> 1;
 +    render->info.mpeg4.vop_time_increment_resolution     = s->avctx->time_base.den;
 +    render->info.mpeg4.vop_coding_type                   = 0;
 +    render->info.mpeg4.vop_fcode_forward                 = s->f_code;
 +    render->info.mpeg4.vop_fcode_backward                = s->b_code;
 +    render->info.mpeg4.resync_marker_disable             = !ctx->resync_marker;
 +    render->info.mpeg4.interlaced                        = !s->progressive_sequence;
 +    render->info.mpeg4.quant_type                        = s->mpeg_quant;
 +    render->info.mpeg4.quarter_sample                    = s->quarter_sample;
 +    render->info.mpeg4.short_video_header                = s->avctx->codec->id == AV_CODEC_ID_H263;
 +    render->info.mpeg4.rounding_control                  = s->no_rounding;
 +    render->info.mpeg4.alternate_vertical_scan_flag      = s->alternate_scan;
 +    render->info.mpeg4.top_field_first                   = s->top_field_first;
 +    for (i = 0; i < 64; ++i) {
 +        render->info.mpeg4.intra_quantizer_matrix[i]     = s->intra_matrix[i];
 +        render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
 +    }
 +    render->info.mpeg4.forward_reference                 = VDP_INVALID_HANDLE;
 +    render->info.mpeg4.backward_reference                = VDP_INVALID_HANDLE;
 +
 +    switch (s->pict_type) {
 +    case AV_PICTURE_TYPE_B:
 +        next = (struct vdpau_render_state *)s->next_picture.f->data[0];
 +        assert(next);
 +        render->info.mpeg4.backward_reference     = next->surface;
 +        render->info.mpeg4.vop_coding_type        = 2;
 +        // no break here, going to set forward prediction
 +    case AV_PICTURE_TYPE_P:
 +        last = (struct vdpau_render_state *)s->last_picture.f->data[0];
 +        assert(last);
 +        render->info.mpeg4.forward_reference      = last->surface;
 +    }
 +
 +    ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
 +
 +    ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
 +    render->bitstream_buffers_used = 0;
 +}
 +#endif /* CONFIG_MPEG4_VDPAU_DECODER */
 +#endif /* FF_API_VDPAU */
 +
  #if FF_API_VDPAU_PROFILE
  int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
  {
diff --cc libavcodec/videotoolbox.c
index 2f4d531,0000000..4dc843d
mode 100644,000000..100644
--- a/libavcodec/videotoolbox.c
+++ b/libavcodec/videotoolbox.c
@@@ -1,701 -1,0 +1,701 @@@
 +/*
 + * Videotoolbox hardware acceleration
 + *
 + * copyright (c) 2012 Sebastien Zwickert
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#include "config.h"
 +#if CONFIG_VIDEOTOOLBOX
 +#  include "videotoolbox.h"
 +#else
 +#  include "vda.h"
 +#endif
 +#include "vda_vt_internal.h"
 +#include "libavutil/avutil.h"
 +#include "bytestream.h"
 +#include "h264.h"
 +#include "mpegvideo.h"
 +
 +#ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
 +#  define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
 +#endif
 +
 +#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING  12
 +
 +static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
 +{
 +    CVPixelBufferRef cv_buffer = (CVImageBufferRef)data;
 +    CVPixelBufferRelease(cv_buffer);
 +}
 +
 +static int videotoolbox_buffer_copy(VTContext *vtctx,
 +                                    const uint8_t *buffer,
 +                                    uint32_t size)
 +{
 +    void *tmp;
 +
 +    tmp = av_fast_realloc(vtctx->bitstream,
 +                         &vtctx->allocated_size,
 +                         size);
 +
 +    if (!tmp)
 +        return AVERROR(ENOMEM);
 +
 +    vtctx->bitstream = tmp;
 +    memcpy(vtctx->bitstream, buffer, size);
 +    vtctx->bitstream_size = size;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
 +{
 +    frame->width  = avctx->width;
 +    frame->height = avctx->height;
 +    frame->format = avctx->pix_fmt;
 +    frame->buf[0] = av_buffer_alloc(1);
 +
 +    if (!frame->buf[0])
 +        return AVERROR(ENOMEM);
 +
 +    return 0;
 +}
 +
 +#define AV_W8(p, v) *(p) = (v)
 +
 +CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
 +{
 +    H264Context *h     = avctx->priv_data;
 +    CFDataRef data = NULL;
 +    uint8_t *p;
-     int vt_extradata_size = 6 + 3 + h->sps.data_size + 4 + h->pps.data_size;
++    int vt_extradata_size = 6 + 3 + h->ps.sps->data_size + 4 + h->ps.sps->data_size;
 +    uint8_t *vt_extradata = av_malloc(vt_extradata_size);
 +    if (!vt_extradata)
 +        return NULL;
 +
 +    p = vt_extradata;
 +
 +    AV_W8(p + 0, 1); /* version */
-     AV_W8(p + 1, h->sps.data[0]); /* profile */
-     AV_W8(p + 2, h->sps.data[1]); /* profile compat */
-     AV_W8(p + 3, h->sps.data[2]); /* level */
++    AV_W8(p + 1, h->ps.sps->data[0]); /* profile */
++    AV_W8(p + 2, h->ps.sps->data[1]); /* profile compat */
++    AV_W8(p + 3, h->ps.sps->data[2]); /* level */
 +    AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
 +    AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
-     AV_WB16(p + 6, h->sps.data_size + 1);
++    AV_WB16(p + 6, h->ps.sps->data_size + 1);
 +    AV_W8(p + 8, NAL_SPS | (3 << 5)); // NAL unit header
-     memcpy(p + 9, h->sps.data, h->sps.data_size);
-     p += 9 + h->sps.data_size;
++    memcpy(p + 9, h->ps.sps->data, h->ps.sps->data_size);
++    p += 9 + h->ps.sps->data_size;
 +    AV_W8(p + 0, 1); /* number of pps */
 +    AV_WB16(p + 1, h->pps.data_size + 1);
 +    AV_W8(p + 3, NAL_PPS | (3 << 5)); // NAL unit header
 +    memcpy(p + 4, h->pps.data, h->pps.data_size);
 +
 +    p += 4 + h->pps.data_size;
 +    av_assert0(p - vt_extradata == vt_extradata_size);
 +
 +    data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
 +    av_free(vt_extradata);
 +    return data;
 +}
 +
 +int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
 +{
 +    av_buffer_unref(&frame->buf[0]);
 +
 +    frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame,
 +                                     sizeof(vtctx->frame),
 +                                     videotoolbox_buffer_release,
 +                                     NULL,
 +                                     AV_BUFFER_FLAG_READONLY);
 +    if (!frame->buf[0]) {
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    frame->data[3] = (uint8_t*)vtctx->frame;
 +    vtctx->frame = NULL;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
 +                                     const uint8_t *buffer,
 +                                     uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    H264Context *h  = avctx->priv_data;
 +
 +    vtctx->bitstream_size = 0;
 +
 +    if (h->is_avc == 1) {
 +        return videotoolbox_buffer_copy(vtctx, buffer, size);
 +    }
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
 +                                      const uint8_t *buffer,
 +                                      uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    H264Context *h  = avctx->priv_data;
 +    void *tmp;
 +
 +    if (h->is_avc == 1)
 +        return 0;
 +
 +    tmp = av_fast_realloc(vtctx->bitstream,
 +                          &vtctx->allocated_size,
 +                          vtctx->bitstream_size+size+4);
 +    if (!tmp)
 +        return AVERROR(ENOMEM);
 +
 +    vtctx->bitstream = tmp;
 +
 +    AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
 +    memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
 +
 +    vtctx->bitstream_size += size + 4;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_uninit(AVCodecContext *avctx)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    if (vtctx) {
 +        av_freep(&vtctx->bitstream);
 +        if (vtctx->frame)
 +            CVPixelBufferRelease(vtctx->frame);
 +    }
 +
 +    return 0;
 +}
 +
 +#if CONFIG_VIDEOTOOLBOX
 +static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
 +{
 +    int i;
 +    uint8_t b;
 +
 +    for (i = 3; i >= 0; i--) {
 +        b = (length >> (i * 7)) & 0x7F;
 +        if (i != 0)
 +            b |= 0x80;
 +
 +        bytestream2_put_byteu(pb, b);
 +    }
 +}
 +
 +static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
 +{
 +    CFDataRef data;
 +    uint8_t *rw_extradata;
 +    PutByteContext pb;
 +    int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
 +    // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
 +    int config_size = 13 + 5 + avctx->extradata_size;
 +    int s;
 +
 +    if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
 +        return NULL;
 +
 +    bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
 +    bytestream2_put_byteu(&pb, 0);        // version
 +    bytestream2_put_ne24(&pb, 0);         // flags
 +
 +    // elementary stream descriptor
 +    bytestream2_put_byteu(&pb, 0x03);     // ES_DescrTag
 +    videotoolbox_write_mp4_descr_length(&pb, full_size);
 +    bytestream2_put_ne16(&pb, 0);         // esid
 +    bytestream2_put_byteu(&pb, 0);        // stream priority (0-32)
 +
 +    // decoder configuration descriptor
 +    bytestream2_put_byteu(&pb, 0x04);     // DecoderConfigDescrTag
 +    videotoolbox_write_mp4_descr_length(&pb, config_size);
 +    bytestream2_put_byteu(&pb, 32);       // object type indication. 32 = AV_CODEC_ID_MPEG4
 +    bytestream2_put_byteu(&pb, 0x11);     // stream type
 +    bytestream2_put_ne24(&pb, 0);         // buffer size
 +    bytestream2_put_ne32(&pb, 0);         // max bitrate
 +    bytestream2_put_ne32(&pb, 0);         // avg bitrate
 +
 +    // decoder specific descriptor
 +    bytestream2_put_byteu(&pb, 0x05);     ///< DecSpecificInfoTag
 +    videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
 +
 +    bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
 +
 +    // SLConfigDescriptor
 +    bytestream2_put_byteu(&pb, 0x06);     // SLConfigDescrTag
 +    bytestream2_put_byteu(&pb, 0x01);     // length
 +    bytestream2_put_byteu(&pb, 0x02);     //
 +
 +    s = bytestream2_size_p(&pb);
 +
 +    data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
 +
 +    av_freep(&rw_extradata);
 +    return data;
 +}
 +
 +static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
 +                                                           void *buffer,
 +                                                           int size)
 +{
 +    OSStatus status;
 +    CMBlockBufferRef  block_buf;
 +    CMSampleBufferRef sample_buf;
 +
 +    block_buf  = NULL;
 +    sample_buf = NULL;
 +
 +    status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
 +                                                buffer,             // memoryBlock
 +                                                size,               // blockLength
 +                                                kCFAllocatorNull,   // blockAllocator
 +                                                NULL,               // customBlockSource
 +                                                0,                  // offsetToData
 +                                                size,               // dataLength
 +                                                0,                  // flags
 +                                                &block_buf);
 +
 +    if (!status) {
 +        status = CMSampleBufferCreate(kCFAllocatorDefault,  // allocator
 +                                      block_buf,            // dataBuffer
 +                                      TRUE,                 // dataReady
 +                                      0,                    // makeDataReadyCallback
 +                                      0,                    // makeDataReadyRefcon
 +                                      fmt_desc,             // formatDescription
 +                                      1,                    // numSamples
 +                                      0,                    // numSampleTimingEntries
 +                                      NULL,                 // sampleTimingArray
 +                                      0,                    // numSampleSizeEntries
 +                                      NULL,                 // sampleSizeArray
 +                                      &sample_buf);
 +    }
 +
 +    if (block_buf)
 +        CFRelease(block_buf);
 +
 +    return sample_buf;
 +}
 +
 +static void videotoolbox_decoder_callback(void *opaque,
 +                                          void *sourceFrameRefCon,
 +                                          OSStatus status,
 +                                          VTDecodeInfoFlags flags,
 +                                          CVImageBufferRef image_buffer,
 +                                          CMTime pts,
 +                                          CMTime duration)
 +{
 +    AVCodecContext *avctx = opaque;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    if (vtctx->frame) {
 +        CVPixelBufferRelease(vtctx->frame);
 +        vtctx->frame = NULL;
 +    }
 +
 +    if (!image_buffer) {
 +        av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
 +        return;
 +    }
 +
 +    vtctx->frame = CVPixelBufferRetain(image_buffer);
 +}
 +
 +static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
 +{
 +    OSStatus status;
 +    CMSampleBufferRef sample_buf;
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
 +                                                   vtctx->bitstream,
 +                                                   vtctx->bitstream_size);
 +
 +    if (!sample_buf)
 +        return -1;
 +
 +    status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
 +                                               sample_buf,
 +                                               0,       // decodeFlags
 +                                               NULL,    // sourceFrameRefCon
 +                                               0);      // infoFlagsOut
 +    if (status == noErr)
 +        status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
 +
 +    CFRelease(sample_buf);
 +
 +    return status;
 +}
 +
 +static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
 +{
 +    int status;
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    av_buffer_unref(&frame->buf[0]);
 +
 +    if (!videotoolbox->session || !vtctx->bitstream)
 +        return AVERROR_INVALIDDATA;
 +
 +    status = videotoolbox_session_decode_frame(avctx);
 +
 +    if (status) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
 +        return AVERROR_UNKNOWN;
 +    }
 +
 +    if (!vtctx->frame)
 +        return AVERROR_UNKNOWN;
 +
 +    return ff_videotoolbox_buffer_create(vtctx, frame);
 +}
 +
 +static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
 +{
 +    H264Context *h = avctx->priv_data;
 +    AVFrame *frame = h->cur_pic_ptr->f;
 +
 +    return videotoolbox_common_end_frame(avctx, frame);
 +}
 +
 +static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
 +                                         const uint8_t *buffer,
 +                                         uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    return videotoolbox_buffer_copy(vtctx, buffer, size);
 +}
 +
 +static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
 +                                          const uint8_t *buffer,
 +                                          uint32_t size)
 +{
 +    return 0;
 +}
 +
 +static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
 +{
 +    MpegEncContext *s = avctx->priv_data;
 +    AVFrame *frame = s->current_picture_ptr->f;
 +
 +    return videotoolbox_common_end_frame(avctx, frame);
 +}
 +
 +static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
 +                                                          AVCodecContext *avctx)
 +{
 +    CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                                   1,
 +                                                                   &kCFTypeDictionaryKeyCallBacks,
 +                                                                   &kCFTypeDictionaryValueCallBacks);
 +
 +    CFDictionarySetValue(config_info,
 +                         kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
 +                         kCFBooleanTrue);
 +
 +    if (avctx->extradata_size) {
 +        CFMutableDictionaryRef avc_info;
 +        CFDataRef data = NULL;
 +
 +        avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                             1,
 +                                             &kCFTypeDictionaryKeyCallBacks,
 +                                             &kCFTypeDictionaryValueCallBacks);
 +
 +        switch (codec_type) {
 +        case kCMVideoCodecType_MPEG4Video :
 +            data = videotoolbox_esds_extradata_create(avctx);
 +            if (data)
 +                CFDictionarySetValue(avc_info, CFSTR("esds"), data);
 +            break;
 +        case kCMVideoCodecType_H264 :
 +            data = ff_videotoolbox_avcc_extradata_create(avctx);
 +            if (data)
 +                CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
 +            break;
 +        default:
 +            break;
 +        }
 +
 +        CFDictionarySetValue(config_info,
 +                kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
 +                avc_info);
 +
 +        if (data)
 +            CFRelease(data);
 +
 +        CFRelease(avc_info);
 +    }
 +    return config_info;
 +}
 +
 +static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
 +                                                             int height,
 +                                                             OSType pix_fmt)
 +{
 +    CFMutableDictionaryRef buffer_attributes;
 +    CFMutableDictionaryRef io_surface_properties;
 +    CFNumberRef cv_pix_fmt;
 +    CFNumberRef w;
 +    CFNumberRef h;
 +
 +    w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
 +    h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
 +    cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
 +
 +    buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                  4,
 +                                                  &kCFTypeDictionaryKeyCallBacks,
 +                                                  &kCFTypeDictionaryValueCallBacks);
 +    io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                      0,
 +                                                      &kCFTypeDictionaryKeyCallBacks,
 +                                                      &kCFTypeDictionaryValueCallBacks);
 +
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
 +
 +    CFRelease(io_surface_properties);
 +    CFRelease(cv_pix_fmt);
 +    CFRelease(w);
 +    CFRelease(h);
 +
 +    return buffer_attributes;
 +}
 +
 +static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
 +                                                                   CFDictionaryRef decoder_spec,
 +                                                                   int width,
 +                                                                   int height)
 +{
 +    CMFormatDescriptionRef cm_fmt_desc;
 +    OSStatus status;
 +
 +    status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
 +                                            codec_type,
 +                                            width,
 +                                            height,
 +                                            decoder_spec, // Dictionary of extension
 +                                            &cm_fmt_desc);
 +
 +    if (status)
 +        return NULL;
 +
 +    return cm_fmt_desc;
 +}
 +
 +static int videotoolbox_default_init(AVCodecContext *avctx)
 +{
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    OSStatus status;
 +    VTDecompressionOutputCallbackRecord decoder_cb;
 +    CFDictionaryRef decoder_spec;
 +    CFDictionaryRef buf_attr;
 +
 +    if (!videotoolbox) {
 +        av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
 +        return -1;
 +    }
 +
 +    switch( avctx->codec_id ) {
 +    case AV_CODEC_ID_H263 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
 +        break;
 +    case AV_CODEC_ID_H264 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
 +        break;
 +    case AV_CODEC_ID_MPEG1VIDEO :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
 +        break;
 +    case AV_CODEC_ID_MPEG2VIDEO :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
 +        break;
 +    case AV_CODEC_ID_MPEG4 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
 +        break;
 +    default :
 +        break;
 +    }
 +
 +    decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
 +
 +    videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
 +                                                                decoder_spec,
 +                                                                avctx->width,
 +                                                                avctx->height);
 +    if (!videotoolbox->cm_fmt_desc) {
 +        if (decoder_spec)
 +            CFRelease(decoder_spec);
 +
 +        av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
 +        return -1;
 +    }
 +
 +    buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
 +                                                     avctx->height,
 +                                                     videotoolbox->cv_pix_fmt_type);
 +
 +    decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
 +    decoder_cb.decompressionOutputRefCon   = avctx;
 +
 +    status = VTDecompressionSessionCreate(NULL,                      // allocator
 +                                          videotoolbox->cm_fmt_desc, // videoFormatDescription
 +                                          decoder_spec,              // videoDecoderSpecification
 +                                          buf_attr,                  // destinationImageBufferAttributes
 +                                          &decoder_cb,               // outputCallback
 +                                          &videotoolbox->session);   // decompressionSessionOut
 +
 +    if (decoder_spec)
 +        CFRelease(decoder_spec);
 +    if (buf_attr)
 +        CFRelease(buf_attr);
 +
 +    switch (status) {
 +    case kVTVideoDecoderNotAvailableNowErr:
 +    case kVTVideoDecoderUnsupportedDataFormatErr:
 +        return AVERROR(ENOSYS);
 +    case kVTVideoDecoderMalfunctionErr:
 +        return AVERROR(EINVAL);
 +    case kVTVideoDecoderBadDataErr :
 +        return AVERROR_INVALIDDATA;
 +    case 0:
 +        return 0;
 +    default:
 +        return AVERROR_UNKNOWN;
 +    }
 +}
 +
 +static void videotoolbox_default_free(AVCodecContext *avctx)
 +{
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +
 +    if (videotoolbox) {
 +        if (videotoolbox->cm_fmt_desc)
 +            CFRelease(videotoolbox->cm_fmt_desc);
 +
 +        if (videotoolbox->session)
 +            VTDecompressionSessionInvalidate(videotoolbox->session);
 +    }
 +}
 +
 +AVHWAccel ff_h263_videotoolbox_hwaccel = {
 +    .name           = "h263_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H263,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_h264_videotoolbox_hwaccel = {
 +    .name           = "h264_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = ff_videotoolbox_h264_start_frame,
 +    .decode_slice   = ff_videotoolbox_h264_decode_slice,
 +    .end_frame      = videotoolbox_h264_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
 +    .name           = "mpeg1_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG1VIDEO,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
 +    .name           = "mpeg2_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG2VIDEO,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
 +    .name           = "mpeg4_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG4,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
 +{
 +    AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
 +
 +    if (ret) {
 +        ret->output_callback = videotoolbox_decoder_callback;
 +        ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
 +    }
 +
 +    return ret;
 +}
 +
 +int av_videotoolbox_default_init(AVCodecContext *avctx)
 +{
 +    return av_videotoolbox_default_init2(avctx, NULL);
 +}
 +
 +int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
 +{
 +    avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
 +    if (!avctx->hwaccel_context)
 +        return AVERROR(ENOMEM);
 +    return videotoolbox_default_init(avctx);
 +}
 +
 +void av_videotoolbox_default_free(AVCodecContext *avctx)
 +{
 +
 +    videotoolbox_default_free(avctx);
 +    av_freep(&avctx->hwaccel_context);
 +}
 +#endif /* CONFIG_VIDEOTOOLBOX */



More information about the ffmpeg-cvslog mailing list