[FFmpeg-cvslog] avcodec/vp8, vp9: Avoid using VP56mv and VP56Frame in VP8/9

Andreas Rheinhardt git at videolan.org
Thu Jul 28 05:01:11 EEST 2022


ffmpeg | branch: master | Andreas Rheinhardt <andreas.rheinhardt at outlook.com> | Sat Jul 23 02:01:41 2022 +0200| [6f7d3bde11b439da106e4226b7d115afded4086d] | committer: Andreas Rheinhardt

avcodec/vp8, vp9: Avoid using VP56mv and VP56Frame in VP8/9

Instead replace VP56mv by new and identical structures VP8mv and VP9mv.
Also replace VP56Frame by VP8FrameType in vp8.h and use that
in VP8 code. Also remove VP56_FRAME_GOLDEN2, as this has only
been used by VP8, and use VP8_FRAME_ALTREF as replacement for
its usage in VP8 as this is more in line with VP8 verbiage.

This allows to remove all inclusions of vp56.h from everything
that is not VP5/6. This also removes implicit inclusions
of hpeldsp.h, h264chroma.h, vp3dsp.h and vp56dsp.h from all VP8/9
files.

(This also fixes a build issue: If one compiles with -O0 and disables
everything except the VP8-VAAPI encoder, the file containing
ff_vpx_norm_shift is not compiled, yet this is used implicitly
by vp56_rac_gets_nn() which is defined in vp56.h; it is unused
by the VP8-VAAPI encoder and declared as av_unused, yet with -O0
unused noninline functions are not optimized away, leading to
linking failures. With this patch, said function is not included
in vaapi_encode_vp8.c any more.)

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt at outlook.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=6f7d3bde11b439da106e4226b7d115afded4086d
---

 libavcodec/nvdec_vp8.c       |   8 +--
 libavcodec/vaapi_vp8.c       |  18 +++---
 libavcodec/vp56.h            |   1 -
 libavcodec/vp8.c             | 138 +++++++++++++++++++++----------------------
 libavcodec/vp8.h             |  35 +++++++----
 libavcodec/vp9.c             |   3 +-
 libavcodec/vp9_mc_template.c |   6 +-
 libavcodec/vp9block.c        |   1 -
 libavcodec/vp9dec.h          |  10 ++--
 libavcodec/vp9mvs.c          |  11 ++--
 libavcodec/vp9prob.c         |   2 -
 libavcodec/vp9recon.c        |  12 ++--
 libavcodec/vp9shared.h       |  10 +++-
 13 files changed, 135 insertions(+), 120 deletions(-)

diff --git a/libavcodec/nvdec_vp8.c b/libavcodec/nvdec_vp8.c
index 9c4608d8cf..f174ca430f 100644
--- a/libavcodec/nvdec_vp8.c
+++ b/libavcodec/nvdec_vp8.c
@@ -39,7 +39,7 @@ static int nvdec_vp8_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
     CUVIDPICPARAMS     *pp = &ctx->pic_params;
     FrameDecodeData *fdd;
     NVDECFrame *cf;
-    AVFrame *cur_frame = h->framep[VP56_FRAME_CURRENT]->tf.f;
+    AVFrame *cur_frame = h->framep[VP8_FRAME_CURRENT]->tf.f;
 
     int ret;
 
@@ -61,9 +61,9 @@ static int nvdec_vp8_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
 
             .first_partition_size        = h->header_partition_size,
 
-            .LastRefIdx                  = safe_get_ref_idx(h->framep[VP56_FRAME_PREVIOUS]),
-            .GoldenRefIdx                = safe_get_ref_idx(h->framep[VP56_FRAME_GOLDEN]),
-            .AltRefIdx                   = safe_get_ref_idx(h->framep[VP56_FRAME_GOLDEN2]),
+            .LastRefIdx                  = safe_get_ref_idx(h->framep[VP8_FRAME_PREVIOUS]),
+            .GoldenRefIdx                = safe_get_ref_idx(h->framep[VP8_FRAME_GOLDEN]),
+            .AltRefIdx                   = safe_get_ref_idx(h->framep[VP8_FRAME_ALTREF]),
             /*
              * Explicit braces for anonymous inners and unnamed fields
              * to work around limitations in ancient versions of gcc.
diff --git a/libavcodec/vaapi_vp8.c b/libavcodec/vaapi_vp8.c
index 06c23e760b..5b18bf8f34 100644
--- a/libavcodec/vaapi_vp8.c
+++ b/libavcodec/vaapi_vp8.c
@@ -36,21 +36,21 @@ static int vaapi_vp8_start_frame(AVCodecContext          *avctx,
                                  av_unused uint32_t       size)
 {
     const VP8Context *s = avctx->priv_data;
-    VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
+    VAAPIDecodePicture *pic = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
     VAPictureParameterBufferVP8 pp;
     VAProbabilityDataBufferVP8 prob;
     VAIQMatrixBufferVP8 quant;
     int err, i, j, k;
 
-    pic->output_surface = vaapi_vp8_surface_id(s->framep[VP56_FRAME_CURRENT]);
+    pic->output_surface = vaapi_vp8_surface_id(s->framep[VP8_FRAME_CURRENT]);
 
     pp = (VAPictureParameterBufferVP8) {
         .frame_width                     = avctx->width,
         .frame_height                    = avctx->height,
 
-        .last_ref_frame                  = vaapi_vp8_surface_id(s->framep[VP56_FRAME_PREVIOUS]),
-        .golden_ref_frame                = vaapi_vp8_surface_id(s->framep[VP56_FRAME_GOLDEN]),
-        .alt_ref_frame                   = vaapi_vp8_surface_id(s->framep[VP56_FRAME_GOLDEN2]),
+        .last_ref_frame                  = vaapi_vp8_surface_id(s->framep[VP8_FRAME_PREVIOUS]),
+        .golden_ref_frame                = vaapi_vp8_surface_id(s->framep[VP8_FRAME_GOLDEN]),
+        .alt_ref_frame                   = vaapi_vp8_surface_id(s->framep[VP8_FRAME_ALTREF]),
         .out_of_loop_frame               = VA_INVALID_SURFACE,
 
         .pic_fields.bits = {
@@ -67,8 +67,8 @@ static int vaapi_vp8_start_frame(AVCodecContext          *avctx,
             .loop_filter_adj_enable      = s->lf_delta.enabled,
             .mode_ref_lf_delta_update    = s->lf_delta.update,
 
-            .sign_bias_golden            = s->sign_bias[VP56_FRAME_GOLDEN],
-            .sign_bias_alternate         = s->sign_bias[VP56_FRAME_GOLDEN2],
+            .sign_bias_golden            = s->sign_bias[VP8_FRAME_GOLDEN],
+            .sign_bias_alternate         = s->sign_bias[VP8_FRAME_ALTREF],
 
             .mb_no_coeff_skip            = s->mbskip_enabled,
             .loop_filter_disable         = s->filter.level == 0,
@@ -177,7 +177,7 @@ fail:
 static int vaapi_vp8_end_frame(AVCodecContext *avctx)
 {
     const VP8Context *s = avctx->priv_data;
-    VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
+    VAAPIDecodePicture *pic = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
 
     return ff_vaapi_decode_issue(avctx, pic);
 }
@@ -187,7 +187,7 @@ static int vaapi_vp8_decode_slice(AVCodecContext *avctx,
                                   uint32_t        size)
 {
     const VP8Context *s = avctx->priv_data;
-    VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
+    VAAPIDecodePicture *pic = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
     VASliceParameterBufferVP8 sp;
     int err, i;
 
diff --git a/libavcodec/vp56.h b/libavcodec/vp56.h
index b1b14b63f8..9dc0b9c7ad 100644
--- a/libavcodec/vp56.h
+++ b/libavcodec/vp56.h
@@ -44,7 +44,6 @@ typedef enum {
     VP56_FRAME_CURRENT  = 0,
     VP56_FRAME_PREVIOUS = 1,
     VP56_FRAME_GOLDEN   = 2,
-    VP56_FRAME_GOLDEN2  = 3,
 } VP56Frame;
 
 typedef enum {
diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c
index de1fdac82e..06752e8c37 100644
--- a/libavcodec/vp8.c
+++ b/libavcodec/vp8.c
@@ -191,10 +191,10 @@ static VP8Frame *vp8_find_free_buffer(VP8Context *s)
 
     // find a free buffer
     for (i = 0; i < 5; i++)
-        if (&s->frames[i] != s->framep[VP56_FRAME_CURRENT]  &&
-            &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
-            &s->frames[i] != s->framep[VP56_FRAME_GOLDEN]   &&
-            &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
+        if (&s->frames[i] != s->framep[VP8_FRAME_CURRENT]  &&
+            &s->frames[i] != s->framep[VP8_FRAME_PREVIOUS] &&
+            &s->frames[i] != s->framep[VP8_FRAME_GOLDEN]   &&
+            &s->frames[i] != s->framep[VP8_FRAME_ALTREF]) {
             frame = &s->frames[i];
             break;
         }
@@ -435,28 +435,28 @@ static void vp8_get_quants(VP8Context *s)
  * The spec isn't clear here, so I'm going by my understanding of what libvpx does
  *
  * Intra frames update all 3 references
- * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
+ * Inter frames update VP8_FRAME_PREVIOUS if the update_last flag is set
  * If the update (golden|altref) flag is set, it's updated with the current frame
- *      if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
+ *      if update_last is set, and VP8_FRAME_PREVIOUS otherwise.
  * If the flag is not set, the number read means:
  *      0: no update
- *      1: VP56_FRAME_PREVIOUS
+ *      1: VP8_FRAME_PREVIOUS
  *      2: update golden with altref, or update altref with golden
  */
-static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
+static VP8FrameType ref_to_update(VP8Context *s, int update, VP8FrameType ref)
 {
     VPXRangeCoder *c = &s->c;
 
     if (update)
-        return VP56_FRAME_CURRENT;
+        return VP8_FRAME_CURRENT;
 
     switch (vp89_rac_get_uint(c, 2)) {
     case 1:
-        return VP56_FRAME_PREVIOUS;
+        return VP8_FRAME_PREVIOUS;
     case 2:
-        return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
+        return (ref == VP8_FRAME_GOLDEN) ? VP8_FRAME_ALTREF : VP8_FRAME_GOLDEN;
     }
-    return VP56_FRAME_NONE;
+    return VP8_FRAME_NONE;
 }
 
 static void vp78_reset_probability_tables(VP8Context *s)
@@ -514,8 +514,8 @@ static void update_refs(VP8Context *s)
     int update_golden = vp89_rac_get(c);
     int update_altref = vp89_rac_get(c);
 
-    s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
-    s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
+    s->update_golden = ref_to_update(s, update_golden, VP8_FRAME_GOLDEN);
+    s->update_altref = ref_to_update(s, update_altref, VP8_FRAME_ALTREF);
 }
 
 static void copy_chroma(AVFrame *dst, AVFrame *src, int width, int height)
@@ -554,22 +554,22 @@ static int vp7_fade_frame(VP8Context *s, int alpha, int beta)
         int height = s->mb_height * 16;
         AVFrame *src, *dst;
 
-        if (!s->framep[VP56_FRAME_PREVIOUS] ||
-            !s->framep[VP56_FRAME_GOLDEN]) {
+        if (!s->framep[VP8_FRAME_PREVIOUS] ||
+            !s->framep[VP8_FRAME_GOLDEN]) {
             av_log(s->avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
             return AVERROR_INVALIDDATA;
         }
 
         dst =
-        src = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
+        src = s->framep[VP8_FRAME_PREVIOUS]->tf.f;
 
         /* preserve the golden frame, write a new previous frame */
-        if (s->framep[VP56_FRAME_GOLDEN] == s->framep[VP56_FRAME_PREVIOUS]) {
-            s->framep[VP56_FRAME_PREVIOUS] = vp8_find_free_buffer(s);
-            if ((ret = vp8_alloc_frame(s, s->framep[VP56_FRAME_PREVIOUS], 1)) < 0)
+        if (s->framep[VP8_FRAME_GOLDEN] == s->framep[VP8_FRAME_PREVIOUS]) {
+            s->framep[VP8_FRAME_PREVIOUS] = vp8_find_free_buffer(s);
+            if ((ret = vp8_alloc_frame(s, s->framep[VP8_FRAME_PREVIOUS], 1)) < 0)
                 return ret;
 
-            dst = s->framep[VP56_FRAME_PREVIOUS]->tf.f;
+            dst = s->framep[VP8_FRAME_PREVIOUS]->tf.f;
 
             copy_chroma(dst, src, width, height);
         }
@@ -630,7 +630,7 @@ static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
         if (hscale || vscale)
             avpriv_request_sample(s->avctx, "Upscaling");
 
-        s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
+        s->update_golden = s->update_altref = VP8_FRAME_CURRENT;
         vp78_reset_probability_tables(s);
         memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter,
                sizeof(s->prob->pred16x16));
@@ -685,8 +685,8 @@ static int vp7_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
 
     /* D. Golden frame update flag (a Flag) for interframes only */
     if (!s->keyframe) {
-        s->update_golden = vp89_rac_get(c) ? VP56_FRAME_CURRENT : VP56_FRAME_NONE;
-        s->sign_bias[VP56_FRAME_GOLDEN] = 0;
+        s->update_golden = vp89_rac_get(c) ? VP8_FRAME_CURRENT : VP8_FRAME_NONE;
+        s->sign_bias[VP8_FRAME_GOLDEN] = 0;
     }
 
     s->update_last          = 1;
@@ -798,7 +798,7 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
         if (hscale || vscale)
             avpriv_request_sample(s->avctx, "Upscaling");
 
-        s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
+        s->update_golden = s->update_altref = VP8_FRAME_CURRENT;
         vp78_reset_probability_tables(s);
         memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter,
                sizeof(s->prob->pred16x16));
@@ -853,8 +853,8 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
 
     if (!s->keyframe) {
         update_refs(s);
-        s->sign_bias[VP56_FRAME_GOLDEN]               = vp89_rac_get(c);
-        s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp89_rac_get(c);
+        s->sign_bias[VP8_FRAME_GOLDEN] = vp89_rac_get(c);
+        s->sign_bias[VP8_FRAME_ALTREF] = vp89_rac_get(c);
     }
 
     // if we aren't saving this frame's probabilities for future frames,
@@ -887,7 +887,7 @@ static int vp8_decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_si
 }
 
 static av_always_inline
-void clamp_mv(VP8mvbounds *s, VP56mv *dst, const VP56mv *src)
+void clamp_mv(VP8mvbounds *s, VP8mv *dst, const VP8mv *src)
 {
     dst->x = av_clip(src->x, av_clip(s->mv_min.x, INT16_MIN, INT16_MAX),
                              av_clip(s->mv_max.x, INT16_MIN, INT16_MAX));
@@ -963,9 +963,9 @@ int decode_splitmvs(VP8Context *s, VPXRangeCoder *c, VP8Macroblock *mb,
     VP8Macroblock *left_mb = &mb[-1];
     const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning];
     const uint8_t *mbsplits_top, *mbsplits_cur, *firstidx;
-    VP56mv *top_mv;
-    VP56mv *left_mv = left_mb->bmv;
-    VP56mv *cur_mv  = mb->bmv;
+    VP8mv *top_mv;
+    VP8mv *left_mv = left_mb->bmv;
+    VP8mv *cur_mv  = mb->bmv;
 
     if (!layout) // layout is inlined, s->mb_layout is not
         top_mb = &mb[2];
@@ -1049,7 +1049,7 @@ static int vp7_calculate_mb_offset(int mb_x, int mb_y, int mb_width,
     return 1;
 }
 
-static const VP56mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
+static const VP8mv *get_bmv_ptr(const VP8Macroblock *mb, int subblock)
 {
     return &mb->bmv[mb->mode == VP8_MVMODE_SPLIT ? vp8_mbsplits[mb->partitioning][subblock] : 0];
 }
@@ -1062,7 +1062,7 @@ void vp7_decode_mvs(VP8Context *s, VP8Macroblock *mb,
     enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR };
     enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
     int idx = CNT_ZERO;
-    VP56mv near_mv[3];
+    VP8mv near_mv[3];
     uint8_t cnt[3] = { 0 };
     VPXRangeCoder *c = &s->c;
     int i;
@@ -1157,7 +1157,7 @@ void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
     int idx = CNT_ZERO;
     int cur_sign_bias = s->sign_bias[mb->ref_frame];
     int8_t *sign_bias = s->sign_bias;
-    VP56mv near_mv[4];
+    VP8mv near_mv[4];
     uint8_t cnt[4] = { 0 };
     VPXRangeCoder *c = &s->c;
 
@@ -1178,7 +1178,7 @@ void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
     {                                                                         \
         VP8Macroblock *edge = mb_edge[n];                                     \
         int edge_ref = edge->ref_frame;                                       \
-        if (edge_ref != VP56_FRAME_CURRENT) {                                 \
+        if (edge_ref != VP8_FRAME_CURRENT) {                                 \
             uint32_t mv = AV_RN32A(&edge->mv);                                \
             if (mv) {                                                         \
                 if (cur_sign_bias != sign_bias[edge_ref]) {                   \
@@ -1211,7 +1211,7 @@ void vp8_decode_mvs(VP8Context *s, VP8mvbounds *mv_bounds, VP8Macroblock *mb,
         /* Swap near and nearest if necessary */
         if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
             FFSWAP(uint8_t,     cnt[CNT_NEAREST],     cnt[CNT_NEAR]);
-            FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
+            FFSWAP(VP8mv,   near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
         }
 
         if (vpx_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
@@ -1331,15 +1331,15 @@ void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds,
 
         mb->chroma_pred_mode = vp89_rac_get_tree(c, vp8_pred8x8c_tree,
                                                  vp8_pred8x8c_prob_intra);
-        mb->ref_frame        = VP56_FRAME_CURRENT;
+        mb->ref_frame        = VP8_FRAME_CURRENT;
     } else if (vpx_rac_get_prob_branchy(c, s->prob->intra)) {
         // inter MB, 16.2
         if (vpx_rac_get_prob_branchy(c, s->prob->last))
             mb->ref_frame =
-                (!is_vp7 && vpx_rac_get_prob(c, s->prob->golden)) ? VP56_FRAME_GOLDEN2 /* altref */
-                                                                   : VP56_FRAME_GOLDEN;
+                (!is_vp7 && vpx_rac_get_prob(c, s->prob->golden)) ? VP8_FRAME_ALTREF
+                                                                  : VP8_FRAME_GOLDEN;
         else
-            mb->ref_frame = VP56_FRAME_PREVIOUS;
+            mb->ref_frame = VP8_FRAME_PREVIOUS;
         s->ref_count[mb->ref_frame - 1]++;
 
         // motion vectors, 16.3
@@ -1357,7 +1357,7 @@ void decode_mb_mode(VP8Context *s, VP8mvbounds *mv_bounds,
 
         mb->chroma_pred_mode = vp89_rac_get_tree(c, vp8_pred8x8c_tree,
                                                  s->prob->pred8x8c);
-        mb->ref_frame        = VP56_FRAME_CURRENT;
+        mb->ref_frame        = VP8_FRAME_CURRENT;
         mb->partitioning     = VP8_SPLITMVMODE_NONE;
         AV_ZERO32(&mb->bmv[0]);
     }
@@ -1848,7 +1848,7 @@ static const uint8_t subpel_idx[3][8] = {
  */
 static av_always_inline
 void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
-                 ThreadFrame *ref, const VP56mv *mv,
+                 ThreadFrame *ref, const VP8mv *mv,
                  int x_off, int y_off, int block_w, int block_h,
                  int width, int height, ptrdiff_t linesize,
                  vp8_mc_func mc_func[3][3])
@@ -1906,7 +1906,7 @@ void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
  */
 static av_always_inline
 void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1,
-                   uint8_t *dst2, ThreadFrame *ref, const VP56mv *mv,
+                   uint8_t *dst2, ThreadFrame *ref, const VP8mv *mv,
                    int x_off, int y_off, int block_w, int block_h,
                    int width, int height, ptrdiff_t linesize,
                    vp8_mc_func mc_func[3][3])
@@ -1958,9 +1958,9 @@ static av_always_inline
 void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
                  ThreadFrame *ref_frame, int x_off, int y_off,
                  int bx_off, int by_off, int block_w, int block_h,
-                 int width, int height, VP56mv *mv)
+                 int width, int height, VP8mv *mv)
 {
-    VP56mv uvmv = *mv;
+    VP8mv uvmv = *mv;
 
     /* Y */
     vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
@@ -2022,7 +2022,7 @@ void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
     int x_off = mb_x << 4, y_off = mb_y << 4;
     int width = 16 * s->mb_width, height = 16 * s->mb_height;
     ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
-    VP56mv *bmv = mb->bmv;
+    VP8mv *bmv = mb->bmv;
 
     switch (mb->partitioning) {
     case VP8_SPLITMVMODE_NONE:
@@ -2031,7 +2031,7 @@ void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
         break;
     case VP8_SPLITMVMODE_4x4: {
         int x, y;
-        VP56mv uvmv;
+        VP8mv uvmv;
 
         /* Y */
         for (y = 0; y < 4; y++) {
@@ -2476,7 +2476,7 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
                            prev_frame && prev_frame->seg_map ?
                            prev_frame->seg_map->data + mb_xy : NULL, 0, is_vp7);
 
-        prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
+        prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_PREVIOUS);
 
         if (!mb->skip)
             decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz, is_vp7);
@@ -2486,7 +2486,7 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
         else
             inter_predict(s, td, dst, mb, mb_x, mb_y);
 
-        prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
+        prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_GOLDEN);
 
         if (!mb->skip) {
             idct_mb(s, td, dst, mb);
@@ -2514,7 +2514,7 @@ static av_always_inline int decode_mb_row_no_filter(AVCodecContext *avctx, void
                                  dst[1], dst[2], s->linesize, s->uvlinesize, 0);
         }
 
-        prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
+        prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP8_FRAME_ALTREF);
 
         dst[0]      += 16;
         dst[1]      += 8;
@@ -2689,10 +2689,10 @@ int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
         avctx->pix_fmt = s->pix_fmt;
     }
 
-    prev_frame = s->framep[VP56_FRAME_CURRENT];
+    prev_frame = s->framep[VP8_FRAME_CURRENT];
 
-    referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT ||
-                 s->update_altref == VP56_FRAME_CURRENT;
+    referenced = s->update_last || s->update_golden == VP8_FRAME_CURRENT ||
+                 s->update_altref == VP8_FRAME_CURRENT;
 
     skip_thresh = !referenced ? AVDISCARD_NONREF
                               : !s->keyframe ? AVDISCARD_NONKEY
@@ -2709,12 +2709,12 @@ int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
     for (i = 0; i < 5; i++)
         if (s->frames[i].tf.f->buf[0] &&
             &s->frames[i] != prev_frame &&
-            &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
-            &s->frames[i] != s->framep[VP56_FRAME_GOLDEN]   &&
-            &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
+            &s->frames[i] != s->framep[VP8_FRAME_PREVIOUS] &&
+            &s->frames[i] != s->framep[VP8_FRAME_GOLDEN]   &&
+            &s->frames[i] != s->framep[VP8_FRAME_ALTREF])
             vp8_release_frame(s, &s->frames[i]);
 
-    curframe = s->framep[VP56_FRAME_CURRENT] = vp8_find_free_buffer(s);
+    curframe = s->framep[VP8_FRAME_CURRENT] = vp8_find_free_buffer(s);
 
     if (!s->colorspace)
         avctx->colorspace = AVCOL_SPC_BT470BG;
@@ -2727,9 +2727,9 @@ int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
      * likely that the values we have on a random interframe are complete
      * junk if we didn't start decode on a keyframe. So just don't display
      * anything rather than junk. */
-    if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
-                         !s->framep[VP56_FRAME_GOLDEN]   ||
-                         !s->framep[VP56_FRAME_GOLDEN2])) {
+    if (!s->keyframe && (!s->framep[VP8_FRAME_PREVIOUS] ||
+                         !s->framep[VP8_FRAME_GOLDEN]   ||
+                         !s->framep[VP8_FRAME_ALTREF])) {
         av_log(avctx, AV_LOG_WARNING,
                "Discarding interframe without a prior keyframe!\n");
         ret = AVERROR_INVALIDDATA;
@@ -2743,22 +2743,22 @@ int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
         goto err;
 
     // check if golden and altref are swapped
-    if (s->update_altref != VP56_FRAME_NONE)
-        s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
+    if (s->update_altref != VP8_FRAME_NONE)
+        s->next_framep[VP8_FRAME_ALTREF] = s->framep[s->update_altref];
     else
-        s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2];
+        s->next_framep[VP8_FRAME_ALTREF] = s->framep[VP8_FRAME_ALTREF];
 
-    if (s->update_golden != VP56_FRAME_NONE)
-        s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
+    if (s->update_golden != VP8_FRAME_NONE)
+        s->next_framep[VP8_FRAME_GOLDEN] = s->framep[s->update_golden];
     else
-        s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN];
+        s->next_framep[VP8_FRAME_GOLDEN] = s->framep[VP8_FRAME_GOLDEN];
 
     if (s->update_last)
-        s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
+        s->next_framep[VP8_FRAME_PREVIOUS] = curframe;
     else
-        s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS];
+        s->next_framep[VP8_FRAME_PREVIOUS] = s->framep[VP8_FRAME_PREVIOUS];
 
-    s->next_framep[VP56_FRAME_CURRENT] = curframe;
+    s->next_framep[VP8_FRAME_CURRENT] = curframe;
 
     if (ffcodec(avctx->codec)->update_thread_context)
         ff_thread_finish_setup(avctx);
diff --git a/libavcodec/vp8.h b/libavcodec/vp8.h
index 9695111806..30aeb4cb06 100644
--- a/libavcodec/vp8.h
+++ b/libavcodec/vp8.h
@@ -34,12 +34,20 @@
 
 #include "h264pred.h"
 #include "threadframe.h"
-#include "vp56.h"
+#include "videodsp.h"
 #include "vp8dsp.h"
 #include "vpx_rac.h"
 
 #define VP8_MAX_QUANT 127
 
+typedef enum {
+    VP8_FRAME_NONE     = -1,
+    VP8_FRAME_CURRENT  =  0,
+    VP8_FRAME_PREVIOUS =  1,
+    VP8_FRAME_GOLDEN   =  2,
+    VP8_FRAME_ALTREF   =  3,
+} VP8FrameType;
+
 enum dct_token {
     DCT_0,
     DCT_1,
@@ -74,6 +82,11 @@ enum inter_splitmvmode {
     VP8_SPLITMVMODE_NONE,        ///< (only used in prediction) no split MVs
 };
 
+typedef struct VP8mv {
+    DECLARE_ALIGNED(4, int16_t, x);
+    int16_t y;
+} VP8mv;
+
 typedef struct VP8FilterStrength {
     uint8_t filter_level;
     uint8_t inner_limit;
@@ -91,8 +104,8 @@ typedef struct VP8Macroblock {
     uint8_t segment;
     uint8_t intra4x4_pred_mode_mb[16];
     DECLARE_ALIGNED(4, uint8_t, intra4x4_pred_mode_top)[4];
-    VP56mv mv;
-    VP56mv bmv[16];
+    VP8mv mv;
+    VP8mv bmv[16];
 } VP8Macroblock;
 
 typedef struct VP8intmv {
@@ -235,10 +248,10 @@ typedef struct VP8Context {
 
         /**
          * filter strength adjustment for macroblocks that reference:
-         * [0] - intra / VP56_FRAME_CURRENT
-         * [1] - VP56_FRAME_PREVIOUS
-         * [2] - VP56_FRAME_GOLDEN
-         * [3] - altref / VP56_FRAME_GOLDEN2
+         * [0] - intra / VP8_FRAME_CURRENT
+         * [1] - VP8_FRAME_PREVIOUS
+         * [2] - VP8_FRAME_GOLDEN
+         * [3] - altref / VP8_FRAME_ALTREF
          */
         int8_t ref[4];
     } lf_delta;
@@ -283,8 +296,8 @@ typedef struct VP8Context {
 
     VP8Macroblock *macroblocks_base;
     int invisible;
-    int update_last;    ///< update VP56_FRAME_PREVIOUS with the current one
-    int update_golden;  ///< VP56_FRAME_NONE if not updated, or which frame to copy if so
+    int update_last;    ///< update VP8_FRAME_PREVIOUS with the current one
+    int update_golden;  ///< VP8_FRAME_NONE if not updated, or which frame to copy if so
     int update_altref;
 
     /**
@@ -329,8 +342,8 @@ typedef struct VP8Context {
 
     /**
      * Interframe DC prediction (VP7)
-     * [0] VP56_FRAME_PREVIOUS
-     * [1] VP56_FRAME_GOLDEN
+     * [0] VP8_FRAME_PREVIOUS
+     * [1] VP8_FRAME_GOLDEN
      */
     uint16_t inter_dc_pred[2][2];
 
diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c
index 1eeb460fc0..bd2951c92e 100644
--- a/libavcodec/vp9.c
+++ b/libavcodec/vp9.c
@@ -34,7 +34,6 @@
 #include "pthread_internal.h"
 
 #include "videodsp.h"
-#include "vp56.h"
 #include "vp89_rac.h"
 #include "vp9.h"
 #include "vp9data.h"
@@ -276,7 +275,7 @@ static int update_size(AVCodecContext *avctx, int w, int h)
     assign(s->intra_pred_data[2],  uint8_t *,             64 * bytesperpixel);
     assign(s->above_y_nnz_ctx,     uint8_t *,             16);
     assign(s->above_mode_ctx,      uint8_t *,             16);
-    assign(s->above_mv_ctx,        VP56mv(*)[2],          16);
+    assign(s->above_mv_ctx,        VP9mv(*)[2],           16);
     assign(s->above_uv_nnz_ctx[0], uint8_t *,             16);
     assign(s->above_uv_nnz_ctx[1], uint8_t *,             16);
     assign(s->above_partition_ctx, uint8_t *,              8);
diff --git a/libavcodec/vp9_mc_template.c b/libavcodec/vp9_mc_template.c
index 31e692f362..6ea3cc3225 100644
--- a/libavcodec/vp9_mc_template.c
+++ b/libavcodec/vp9_mc_template.c
@@ -22,9 +22,9 @@
  */
 
 #define ROUNDED_DIV_MVx2(a, b) \
-    (VP56mv) { .x = ROUNDED_DIV(a.x + b.x, 2), .y = ROUNDED_DIV(a.y + b.y, 2) }
+    (VP9mv) { .x = ROUNDED_DIV(a.x + b.x, 2), .y = ROUNDED_DIV(a.y + b.y, 2) }
 #define ROUNDED_DIV_MVx4(a, b, c, d) \
-    (VP56mv) { .x = ROUNDED_DIV(a.x + b.x + c.x + d.x, 4), \
+    (VP9mv) { .x = ROUNDED_DIV(a.x + b.x + c.x + d.x, 4), \
                .y = ROUNDED_DIV(a.y + b.y + c.y + d.y, 4) }
 
 static void FN(inter_pred)(VP9TileData *td)
@@ -51,7 +51,7 @@ static void FN(inter_pred)(VP9TileData *td)
 
     // y inter pred
     if (b->bs > BS_8x8) {
-        VP56mv uvmv;
+        VP9mv uvmv;
 
 #if SCALED == 0
         if (b->bs == BS_8x4) {
diff --git a/libavcodec/vp9block.c b/libavcodec/vp9block.c
index baa343fddc..c6103ee6f0 100644
--- a/libavcodec/vp9block.c
+++ b/libavcodec/vp9block.c
@@ -24,7 +24,6 @@
 #include "libavutil/avassert.h"
 
 #include "threadframe.h"
-#include "vp56.h"
 #include "vp89_rac.h"
 #include "vp9.h"
 #include "vp9data.h"
diff --git a/libavcodec/vp9dec.h b/libavcodec/vp9dec.h
index 56676a7c03..4bfd50d27b 100644
--- a/libavcodec/vp9dec.h
+++ b/libavcodec/vp9dec.h
@@ -33,6 +33,8 @@
 #include "libavutil/thread.h"
 #include "libavutil/internal.h"
 
+#include "get_bits.h"
+#include "videodsp.h"
 #include "vp9.h"
 #include "vp9dsp.h"
 #include "vp9shared.h"
@@ -83,7 +85,7 @@ typedef struct VP9Filter {
 typedef struct VP9Block {
     uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
     enum FilterMode filter;
-    VP56mv mv[4 /* b_idx */][2 /* ref */];
+    VP9mv mv[4 /* b_idx */][2 /* ref */];
     enum BlockSize bs;
     enum TxfmMode tx, uvtx;
     enum BlockLevel bl;
@@ -147,7 +149,7 @@ typedef struct VP9Context {
     uint8_t *above_comp_ctx; // 1bit
     uint8_t *above_ref_ctx; // 2bit
     uint8_t *above_filter_ctx;
-    VP56mv (*above_mv_ctx)[2];
+    VP9mv (*above_mv_ctx)[2];
 
     // whole-frame cache
     uint8_t *intra_pred_data[3];
@@ -210,7 +212,7 @@ struct VP9TileData {
     // contextual (left) cache
     DECLARE_ALIGNED(16, uint8_t, left_y_nnz_ctx)[16];
     DECLARE_ALIGNED(16, uint8_t, left_mode_ctx)[16];
-    DECLARE_ALIGNED(16, VP56mv, left_mv_ctx)[16][2];
+    DECLARE_ALIGNED(16, VP9mv, left_mv_ctx)[16][2];
     DECLARE_ALIGNED(16, uint8_t, left_uv_nnz_ctx)[2][16];
     DECLARE_ALIGNED(8, uint8_t, left_partition_ctx)[8];
     DECLARE_ALIGNED(8, uint8_t, left_skip_ctx)[8];
@@ -238,7 +240,7 @@ struct VP9TileData {
     unsigned int nb_block_structure;
 };
 
-void ff_vp9_fill_mv(VP9TileData *td, VP56mv *mv, int mode, int sb);
+void ff_vp9_fill_mv(VP9TileData *td, VP9mv *mv, int mode, int sb);
 
 void ff_vp9_adapt_probs(VP9Context *s);
 
diff --git a/libavcodec/vp9mvs.c b/libavcodec/vp9mvs.c
index c604ec7cb6..8b682166e4 100644
--- a/libavcodec/vp9mvs.c
+++ b/libavcodec/vp9mvs.c
@@ -22,13 +22,12 @@
  */
 
 #include "threadframe.h"
-#include "vp56.h"
 #include "vp89_rac.h"
 #include "vp9data.h"
 #include "vp9dec.h"
 #include "vpx_rac.h"
 
-static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
+static av_always_inline void clamp_mv(VP9mv *dst, const VP9mv *src,
                                       VP9TileData *td)
 {
     dst->x = av_clip(src->x, td->min_mv.x, td->max_mv.x);
@@ -36,7 +35,7 @@ static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
 }
 
 static void find_ref_mvs(VP9TileData *td,
-                         VP56mv *pmv, int ref, int z, int idx, int sb)
+                         VP9mv *pmv, int ref, int z, int idx, int sb)
 {
     static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
         [BS_64x64] = { {  3, -1 }, { -1,  3 }, {  4, -1 }, { -1,  4 },
@@ -100,7 +99,7 @@ static void find_ref_mvs(VP9TileData *td,
 #define RETURN_MV(mv)                                                  \
     do {                                                               \
         if (sb > 0) {                                                  \
-            VP56mv tmp;                                                \
+            VP9mv tmp;                                                 \
             uint32_t m;                                                \
             av_assert2(idx == 1);                                      \
             av_assert2(mem != INVALID_MV);                             \
@@ -186,7 +185,7 @@ static void find_ref_mvs(VP9TileData *td,
 #define RETURN_SCALE_MV(mv, scale)              \
     do {                                        \
         if (scale) {                            \
-            VP56mv mv_temp = { -mv.x, -mv.y };  \
+            VP9mv mv_temp = { -mv.x, -mv.y };   \
             RETURN_MV(mv_temp);                 \
         } else {                                \
             RETURN_MV(mv);                      \
@@ -289,7 +288,7 @@ static av_always_inline int read_mv_component(VP9TileData *td, int idx, int hp)
     return sign ? -(n + 1) : (n + 1);
 }
 
-void ff_vp9_fill_mv(VP9TileData *td, VP56mv *mv, int mode, int sb)
+void ff_vp9_fill_mv(VP9TileData *td, VP9mv *mv, int mode, int sb)
 {
     VP9Context *s = td->s;
     VP9Block *b = td->b;
diff --git a/libavcodec/vp9prob.c b/libavcodec/vp9prob.c
index fb295b482d..69a5180770 100644
--- a/libavcodec/vp9prob.c
+++ b/libavcodec/vp9prob.c
@@ -21,9 +21,7 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
-#include "vp56.h"
 #include "vp9.h"
-#include "vp9data.h"
 #include "vp9dec.h"
 
 static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
diff --git a/libavcodec/vp9recon.c b/libavcodec/vp9recon.c
index bfafde8c9c..e758dca55b 100644
--- a/libavcodec/vp9recon.c
+++ b/libavcodec/vp9recon.c
@@ -299,7 +299,7 @@ static av_always_inline void mc_luma_unscaled(VP9TileData *td, vp9_mc_func (*mc)
                                               uint8_t *dst, ptrdiff_t dst_stride,
                                               const uint8_t *ref, ptrdiff_t ref_stride,
                                               ThreadFrame *ref_frame,
-                                              ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
+                                              ptrdiff_t y, ptrdiff_t x, const VP9mv *mv,
                                               int bw, int bh, int w, int h, int bytesperpixel)
 {
     VP9Context *s = td->s;
@@ -337,7 +337,7 @@ static av_always_inline void mc_chroma_unscaled(VP9TileData *td, vp9_mc_func (*m
                                                 const uint8_t *ref_u, ptrdiff_t src_stride_u,
                                                 const uint8_t *ref_v, ptrdiff_t src_stride_v,
                                                 ThreadFrame *ref_frame,
-                                                ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
+                                                ptrdiff_t y, ptrdiff_t x, const VP9mv *mv,
                                                 int bw, int bh, int w, int h, int bytesperpixel)
 {
     VP9Context *s = td->s;
@@ -408,7 +408,7 @@ static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func
                                             uint8_t *dst, ptrdiff_t dst_stride,
                                             const uint8_t *ref, ptrdiff_t ref_stride,
                                             ThreadFrame *ref_frame,
-                                            ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
+                                            ptrdiff_t y, ptrdiff_t x, const VP9mv *in_mv,
                                             int px, int py, int pw, int ph,
                                             int bw, int bh, int w, int h, int bytesperpixel,
                                             const uint16_t *scale, const uint8_t *step)
@@ -423,7 +423,7 @@ static av_always_inline void mc_luma_scaled(VP9TileData *td, vp9_scaled_mc_func
     int mx, my;
     int refbw_m1, refbh_m1;
     int th;
-    VP56mv mv;
+    VP9mv mv;
 
     mv.x = av_clip(in_mv->x, -(x + pw - px + 4) * 8, (s->cols * 8 - x + px + 3) * 8);
     mv.y = av_clip(in_mv->y, -(y + ph - py + 4) * 8, (s->rows * 8 - y + py + 3) * 8);
@@ -468,7 +468,7 @@ static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_fun
                                               const uint8_t *ref_u, ptrdiff_t src_stride_u,
                                               const uint8_t *ref_v, ptrdiff_t src_stride_v,
                                               ThreadFrame *ref_frame,
-                                              ptrdiff_t y, ptrdiff_t x, const VP56mv *in_mv,
+                                              ptrdiff_t y, ptrdiff_t x, const VP9mv *in_mv,
                                               int px, int py, int pw, int ph,
                                               int bw, int bh, int w, int h, int bytesperpixel,
                                               const uint16_t *scale, const uint8_t *step)
@@ -483,7 +483,7 @@ static av_always_inline void mc_chroma_scaled(VP9TileData *td, vp9_scaled_mc_fun
     int mx, my;
     int refbw_m1, refbh_m1;
     int th;
-    VP56mv mv;
+    VP9mv mv;
 
     if (s->ss_h) {
         // BUG https://code.google.com/p/webm/issues/detail?id=820
diff --git a/libavcodec/vp9shared.h b/libavcodec/vp9shared.h
index ebaa11d2c1..543a496df8 100644
--- a/libavcodec/vp9shared.h
+++ b/libavcodec/vp9shared.h
@@ -27,9 +27,10 @@
 #include <stddef.h>
 #include <stdint.h>
 
+#include "libavutil/mem_internal.h"
+
 #include "vp9.h"
 #include "threadframe.h"
-#include "vp56.h"
 
 enum BlockPartition {
     PARTITION_NONE,    // [ ] <-.
@@ -51,8 +52,13 @@ enum CompPredMode {
     PRED_SWITCHABLE,
 };
 
+typedef struct VP9mv {
+    DECLARE_ALIGNED(4, int16_t, x);
+    int16_t y;
+} VP9mv;
+
 typedef struct VP9mvrefPair {
-    VP56mv mv[2];
+    VP9mv mv[2];
     int8_t ref[2];
 } VP9mvrefPair;
 



More information about the ffmpeg-cvslog mailing list