[FFmpeg-cvslog] Merge commit '99c554efc8b09c3f1bb2fb41c3da5431085f7470'

Clément Bœsch git at videolan.org
Mon Jun 20 12:05:27 CEST 2016


ffmpeg | branch: master | Clément Bœsch <clement at stupeflix.com> | Mon Jun 20 11:37:49 2016 +0200| [d98ca4b14c673db8eb3cc124e42ff7a660da9109] | committer: Clément Bœsch

Merge commit '99c554efc8b09c3f1bb2fb41c3da5431085f7470'

* commit '99c554efc8b09c3f1bb2fb41c3da5431085f7470':
  h264: eliminate low_delay

ff_print_debug_info2() is adjusted to allow a NULL pointer as low_delay.
It's only useful for MPEG codecs with the exception of H264.

Merged-by: Clément Bœsch <clement at stupeflix.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=d98ca4b14c673db8eb3cc124e42ff7a660da9109
---

 libavcodec/h264.c       |    9 +--------
 libavcodec/h264.h       |    1 -
 libavcodec/h264_slice.c |   16 ----------------
 libavcodec/mpegvideo.c  |    3 ++-
 4 files changed, 3 insertions(+), 26 deletions(-)

diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index aea6f49..bf8e12d 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -368,10 +368,6 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
     if (ret < 0)
         return ret;
 
-    /* set defaults */
-    if (!avctx->has_b_frames)
-        h->low_delay = 1;
-
     ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
     if (ret != 0) {
         av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
@@ -401,7 +397,6 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
     if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
         h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
         h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
-        h->low_delay           = 0;
     }
 
     avctx->internal->allocate_progress = 1;
@@ -631,7 +626,6 @@ static void decode_postinit(H264Context *h, int setup_finished)
         h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
         h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
     }
-    h->low_delay = !h->avctx->has_b_frames;
 
     for (i = 0; 1; i++) {
         if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
@@ -655,7 +649,6 @@ static void decode_postinit(H264Context *h, int setup_finished)
     } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
         av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order);
         h->avctx->has_b_frames = out_of_order;
-        h->low_delay = 0;
     }
 
     pics = 0;
@@ -1353,7 +1346,7 @@ static int h264_decode_frame(AVCodecContext *avctx, void *data,
                                     h->next_output_pic->mb_type,
                                     h->next_output_pic->qscale_table,
                                     h->next_output_pic->motion_val,
-                                    &h->low_delay,
+                                    NULL,
                                     h->mb_width, h->mb_height, h->mb_stride, 1);
             }
         }
diff --git a/libavcodec/h264.h b/libavcodec/h264.h
index f5a2a90..7b0555e 100644
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@ -494,7 +494,6 @@ typedef struct H264Context {
 
     int droppable;
     int coded_picture_number;
-    int low_delay;
 
     int context_initialized;
     int flags;
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index d8da424..c303051 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -386,7 +386,6 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
     h->first_field          = h1->first_field;
     h->picture_structure    = h1->picture_structure;
     h->droppable            = h1->droppable;
-    h->low_delay            = h1->low_delay;
     h->backup_width         = h1->backup_width;
     h->backup_height        = h1->backup_height;
     h->backup_pix_fmt       = h1->backup_pix_fmt;
@@ -408,7 +407,6 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
 
     h->enable_er       = h1->enable_er;
     h->workaround_bugs = h1->workaround_bugs;
-    h->low_delay       = h1->low_delay;
     h->droppable       = h1->droppable;
 
     // extradata/NAL handling
@@ -1218,20 +1216,6 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
         if (h->bit_depth_luma    != h->ps.sps->bit_depth_luma ||
             h->chroma_format_idc != h->ps.sps->chroma_format_idc)
             needs_reinit         = 1;
-
-        if (h->flags & AV_CODEC_FLAG_LOW_DELAY ||
-            (h->ps.sps->bitstream_restriction_flag &&
-             !h->ps.sps->num_reorder_frames)) {
-            if (h->avctx->has_b_frames > 1 || h->delayed_pic[0])
-                av_log(h->avctx, AV_LOG_WARNING, "Delayed frames seen. "
-                       "Reenabling low delay requires a codec flush.\n");
-            else
-                h->low_delay = 1;
-        }
-
-        if (h->avctx->has_b_frames < 2)
-            h->avctx->has_b_frames = !h->low_delay;
-
     }
 
     pps = h->ps.pps;
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 80d5386..ecb585f 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -1757,7 +1757,8 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_
         const int mv_stride      = (mb_width << mv_sample_log2) +
                                    (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
 
-        *low_delay = 0; // needed to see the vectors without trashing the buffers
+        if (low_delay)
+            *low_delay = 0; // needed to see the vectors without trashing the buffers
 
         avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
 


======================================================================

diff --cc libavcodec/h264.c
index aea6f49,47e3c93..bf8e12d
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@@ -628,36 -573,10 +623,34 @@@ static void decode_postinit(H264Contex
  
      /* Sort B-frames into display order */
      if (sps->bitstream_restriction_flag ||
 -        h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
 +        h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
          h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
      }
-     h->low_delay = !h->avctx->has_b_frames;
  
 +    for (i = 0; 1; i++) {
 +        if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
 +            if(i)
 +                h->last_pocs[i-1] = cur->poc;
 +            break;
 +        } else if(i) {
 +            h->last_pocs[i-1]= h->last_pocs[i];
 +        }
 +    }
 +    out_of_order = MAX_DELAYED_PIC_COUNT - i;
 +    if(   cur->f->pict_type == AV_PICTURE_TYPE_B
 +       || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
 +        out_of_order = FFMAX(out_of_order, 1);
 +    if (out_of_order == MAX_DELAYED_PIC_COUNT) {
 +        av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
 +        for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
 +            h->last_pocs[i] = INT_MIN;
 +        h->last_pocs[0] = cur->poc;
 +        cur->mmco_reset = 1;
 +    } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
 +        av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order);
 +        h->avctx->has_b_frames = out_of_order;
-         h->low_delay = 0;
 +    }
 +
      pics = 0;
      while (h->delayed_pic[pics])
          pics++;
@@@ -1348,14 -1056,6 +1341,14 @@@ static int h264_decode_frame(AVCodecCon
              if (ret < 0)
                  return ret;
              *got_frame = 1;
 +            if (CONFIG_MPEGVIDEO) {
 +                ff_print_debug_info2(h->avctx, pict, NULL,
 +                                    h->next_output_pic->mb_type,
 +                                    h->next_output_pic->qscale_table,
 +                                    h->next_output_pic->motion_val,
-                                     &h->low_delay,
++                                    NULL,
 +                                    h->mb_width, h->mb_height, h->mb_stride, 1);
 +            }
          }
      }
  
diff --cc libavcodec/h264.h
index f5a2a90,55c31f6..7b0555e
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@@ -484,17 -468,8 +484,16 @@@ typedef struct H264Context 
      int width, height;
      int chroma_x_shift, chroma_y_shift;
  
 +    /**
 +     * Backup frame properties: needed, because they can be different
 +     * between returned frame and last decoded frame.
 +     **/
 +    int backup_width;
 +    int backup_height;
 +    enum AVPixelFormat backup_pix_fmt;
 +
      int droppable;
      int coded_picture_number;
-     int low_delay;
  
      int context_initialized;
      int flags;
diff --cc libavcodec/h264_slice.c
index d8da424,fda6d32..c303051
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@@ -386,10 -378,6 +386,9 @@@ int ff_h264_update_thread_context(AVCod
      h->first_field          = h1->first_field;
      h->picture_structure    = h1->picture_structure;
      h->droppable            = h1->droppable;
-     h->low_delay            = h1->low_delay;
 +    h->backup_width         = h1->backup_width;
 +    h->backup_height        = h1->backup_height;
 +    h->backup_pix_fmt       = h1->backup_pix_fmt;
  
      for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
          ff_h264_unref_picture(h, &h->DPB[i]);
diff --cc libavcodec/mpegvideo.c
index 80d5386,5974e18..ecb585f
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@@ -1733,628 -1368,13 +1733,629 @@@ void ff_print_debug_info2(AVCodecContex
  
  
                      if (IS_INTERLACED(mb_type))
 -                        av_log(s->avctx, AV_LOG_DEBUG, "=");
 +                        av_log(avctx, AV_LOG_DEBUG, "=");
                      else
 -                        av_log(s->avctx, AV_LOG_DEBUG, " ");
 +                        av_log(avctx, AV_LOG_DEBUG, " ");
 +                }
 +            }
 +            av_log(avctx, AV_LOG_DEBUG, "\n");
 +        }
 +    }
 +
 +    if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) ||
 +        (avctx->debug_mv)) {
 +        int mb_y;
 +        int i;
 +        int h_chroma_shift, v_chroma_shift, block_height;
 +#if FF_API_VISMV
 +        const int shift = 1 + quarter_sample;
 +        uint8_t *ptr;
 +        const int width          = avctx->width;
 +        const int height         = avctx->height;
 +#endif
 +        const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_SVQ3 ? 2 : 1;
 +        const int mv_stride      = (mb_width << mv_sample_log2) +
 +                                   (avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
 +
-         *low_delay = 0; // needed to see the vectors without trashing the buffers
++        if (low_delay)
++            *low_delay = 0; // needed to see the vectors without trashing the buffers
 +
 +        avcodec_get_chroma_sub_sample(avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
 +
 +        av_frame_make_writable(pict);
 +
 +        pict->opaque = NULL;
 +#if FF_API_VISMV
 +        ptr          = pict->data[0];
 +#endif
 +        block_height = 16 >> v_chroma_shift;
 +
 +        for (mb_y = 0; mb_y < mb_height; mb_y++) {
 +            int mb_x;
 +            for (mb_x = 0; mb_x < mb_width; mb_x++) {
 +                const int mb_index = mb_x + mb_y * mb_stride;
 +#if FF_API_VISMV
 +                if ((avctx->debug_mv) && motion_val[0]) {
 +                    int type;
 +                    for (type = 0; type < 3; type++) {
 +                        int direction = 0;
 +                        switch (type) {
 +                        case 0:
 +                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_P_FOR)) ||
 +                                (pict->pict_type!= AV_PICTURE_TYPE_P))
 +                                continue;
 +                            direction = 0;
 +                            break;
 +                        case 1:
 +                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_FOR)) ||
 +                                (pict->pict_type!= AV_PICTURE_TYPE_B))
 +                                continue;
 +                            direction = 0;
 +                            break;
 +                        case 2:
 +                            if ((!(avctx->debug_mv & FF_DEBUG_VIS_MV_B_BACK)) ||
 +                                (pict->pict_type!= AV_PICTURE_TYPE_B))
 +                                continue;
 +                            direction = 1;
 +                            break;
 +                        }
 +                        if (!USES_LIST(mbtype_table[mb_index], direction))
 +                            continue;
 +
 +                        if (IS_8X8(mbtype_table[mb_index])) {
 +                            int i;
 +                            for (i = 0; i < 4; i++) {
 +                                int sx = mb_x * 16 + 4 + 8 * (i & 1);
 +                                int sy = mb_y * 16 + 4 + 8 * (i >> 1);
 +                                int xy = (mb_x * 2 + (i & 1) +
 +                                          (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
 +                                int mx = (motion_val[direction][xy][0] >> shift) + sx;
 +                                int my = (motion_val[direction][xy][1] >> shift) + sy;
 +                                draw_arrow(ptr, sx, sy, mx, my, width,
 +                                           height, pict->linesize[0], 100, 0, direction);
 +                            }
 +                        } else if (IS_16X8(mbtype_table[mb_index])) {
 +                            int i;
 +                            for (i = 0; i < 2; i++) {
 +                                int sx = mb_x * 16 + 8;
 +                                int sy = mb_y * 16 + 4 + 8 * i;
 +                                int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
 +                                int mx = (motion_val[direction][xy][0] >> shift);
 +                                int my = (motion_val[direction][xy][1] >> shift);
 +
 +                                if (IS_INTERLACED(mbtype_table[mb_index]))
 +                                    my *= 2;
 +
 +                                draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
 +                                           height, pict->linesize[0], 100, 0, direction);
 +                            }
 +                        } else if (IS_8X16(mbtype_table[mb_index])) {
 +                            int i;
 +                            for (i = 0; i < 2; i++) {
 +                                int sx = mb_x * 16 + 4 + 8 * i;
 +                                int sy = mb_y * 16 + 8;
 +                                int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
 +                                int mx = motion_val[direction][xy][0] >> shift;
 +                                int my = motion_val[direction][xy][1] >> shift;
 +
 +                                if (IS_INTERLACED(mbtype_table[mb_index]))
 +                                    my *= 2;
 +
 +                                draw_arrow(ptr, sx, sy, mx + sx, my + sy, width,
 +                                           height, pict->linesize[0], 100, 0, direction);
 +                            }
 +                        } else {
 +                              int sx= mb_x * 16 + 8;
 +                              int sy= mb_y * 16 + 8;
 +                              int xy= (mb_x + mb_y * mv_stride) << mv_sample_log2;
 +                              int mx= (motion_val[direction][xy][0]>>shift) + sx;
 +                              int my= (motion_val[direction][xy][1]>>shift) + sy;
 +                              draw_arrow(ptr, sx, sy, mx, my, width, height, pict->linesize[0], 100, 0, direction);
 +                        }
 +                    }
 +                }
 +#endif
 +                if ((avctx->debug & FF_DEBUG_VIS_QP)) {
 +                    uint64_t c = (qscale_table[mb_index] * 128 / 31) *
 +                                 0x0101010101010101ULL;
 +                    int y;
 +                    for (y = 0; y < block_height; y++) {
 +                        *(uint64_t *)(pict->data[1] + 8 * mb_x +
 +                                      (block_height * mb_y + y) *
 +                                      pict->linesize[1]) = c;
 +                        *(uint64_t *)(pict->data[2] + 8 * mb_x +
 +                                      (block_height * mb_y + y) *
 +                                      pict->linesize[2]) = c;
 +                    }
 +                }
 +                if ((avctx->debug & FF_DEBUG_VIS_MB_TYPE) &&
 +                    motion_val[0]) {
 +                    int mb_type = mbtype_table[mb_index];
 +                    uint64_t u,v;
 +                    int y;
 +#define COLOR(theta, r) \
 +    u = (int)(128 + r * cos(theta * M_PI / 180)); \
 +    v = (int)(128 + r * sin(theta * M_PI / 180));
 +
 +
 +                    u = v = 128;
 +                    if (IS_PCM(mb_type)) {
 +                        COLOR(120, 48)
 +                    } else if ((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) ||
 +                               IS_INTRA16x16(mb_type)) {
 +                        COLOR(30, 48)
 +                    } else if (IS_INTRA4x4(mb_type)) {
 +                        COLOR(90, 48)
 +                    } else if (IS_DIRECT(mb_type) && IS_SKIP(mb_type)) {
 +                        // COLOR(120, 48)
 +                    } else if (IS_DIRECT(mb_type)) {
 +                        COLOR(150, 48)
 +                    } else if (IS_GMC(mb_type) && IS_SKIP(mb_type)) {
 +                        COLOR(170, 48)
 +                    } else if (IS_GMC(mb_type)) {
 +                        COLOR(190, 48)
 +                    } else if (IS_SKIP(mb_type)) {
 +                        // COLOR(180, 48)
 +                    } else if (!USES_LIST(mb_type, 1)) {
 +                        COLOR(240, 48)
 +                    } else if (!USES_LIST(mb_type, 0)) {
 +                        COLOR(0, 48)
 +                    } else {
 +                        av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
 +                        COLOR(300,48)
 +                    }
 +
 +                    u *= 0x0101010101010101ULL;
 +                    v *= 0x0101010101010101ULL;
 +                    for (y = 0; y < block_height; y++) {
 +                        *(uint64_t *)(pict->data[1] + 8 * mb_x +
 +                                      (block_height * mb_y + y) * pict->linesize[1]) = u;
 +                        *(uint64_t *)(pict->data[2] + 8 * mb_x +
 +                                      (block_height * mb_y + y) * pict->linesize[2]) = v;
 +                    }
 +
 +                    // segmentation
 +                    if (IS_8X8(mb_type) || IS_16X8(mb_type)) {
 +                        *(uint64_t *)(pict->data[0] + 16 * mb_x + 0 +
 +                                      (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
 +                        *(uint64_t *)(pict->data[0] + 16 * mb_x + 8 +
 +                                      (16 * mb_y + 8) * pict->linesize[0]) ^= 0x8080808080808080ULL;
 +                    }
 +                    if (IS_8X8(mb_type) || IS_8X16(mb_type)) {
 +                        for (y = 0; y < 16; y++)
 +                            pict->data[0][16 * mb_x + 8 + (16 * mb_y + y) *
 +                                          pict->linesize[0]] ^= 0x80;
 +                    }
 +                    if (IS_8X8(mb_type) && mv_sample_log2 >= 2) {
 +                        int dm = 1 << (mv_sample_log2 - 2);
 +                        for (i = 0; i < 4; i++) {
 +                            int sx = mb_x * 16 + 8 * (i & 1);
 +                            int sy = mb_y * 16 + 8 * (i >> 1);
 +                            int xy = (mb_x * 2 + (i & 1) +
 +                                     (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
 +                            // FIXME bidir
 +                            int32_t *mv = (int32_t *) &motion_val[0][xy];
 +                            if (mv[0] != mv[dm] ||
 +                                mv[dm * mv_stride] != mv[dm * (mv_stride + 1)])
 +                                for (y = 0; y < 8; y++)
 +                                    pict->data[0][sx + 4 + (sy + y) * pict->linesize[0]] ^= 0x80;
 +                            if (mv[0] != mv[dm * mv_stride] || mv[dm] != mv[dm * (mv_stride + 1)])
 +                                *(uint64_t *)(pict->data[0] + sx + (sy + 4) *
 +                                              pict->linesize[0]) ^= 0x8080808080808080ULL;
 +                        }
 +                    }
 +
 +                    if (IS_INTERLACED(mb_type) &&
 +                        avctx->codec->id == AV_CODEC_ID_H264) {
 +                        // hmm
 +                    }
 +                }
 +                if (mbskip_table)
 +                    mbskip_table[mb_index] = 0;
 +            }
 +        }
 +    }
 +}
 +
 +void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
 +{
 +    ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
 +                         p->qscale_table, p->motion_val, &s->low_delay,
 +                         s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
 +}
 +
 +int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
 +{
 +    AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
 +    int offset = 2*s->mb_stride + 1;
 +    if(!ref)
 +        return AVERROR(ENOMEM);
 +    av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
 +    ref->size -= offset;
 +    ref->data += offset;
 +    return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
 +}
 +
 +static inline int hpel_motion_lowres(MpegEncContext *s,
 +                                     uint8_t *dest, uint8_t *src,
 +                                     int field_based, int field_select,
 +                                     int src_x, int src_y,
 +                                     int width, int height, ptrdiff_t stride,
 +                                     int h_edge_pos, int v_edge_pos,
 +                                     int w, int h, h264_chroma_mc_func *pix_op,
 +                                     int motion_x, int motion_y)
 +{
 +    const int lowres   = s->avctx->lowres;
 +    const int op_index = FFMIN(lowres, 3);
 +    const int s_mask   = (2 << lowres) - 1;
 +    int emu = 0;
 +    int sx, sy;
 +
 +    if (s->quarter_sample) {
 +        motion_x /= 2;
 +        motion_y /= 2;
 +    }
 +
 +    sx = motion_x & s_mask;
 +    sy = motion_y & s_mask;
 +    src_x += motion_x >> lowres + 1;
 +    src_y += motion_y >> lowres + 1;
 +
 +    src   += src_y * stride + src_x;
 +
 +    if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
 +        (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
 +        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
 +                                 s->linesize, s->linesize,
 +                                 w + 1, (h + 1) << field_based,
 +                                 src_x, src_y   << field_based,
 +                                 h_edge_pos, v_edge_pos);
 +        src = s->sc.edge_emu_buffer;
 +        emu = 1;
 +    }
 +
 +    sx = (sx << 2) >> lowres;
 +    sy = (sy << 2) >> lowres;
 +    if (field_select)
 +        src += s->linesize;
 +    pix_op[op_index](dest, src, stride, h, sx, sy);
 +    return emu;
 +}
 +
 +/* apply one mpeg motion vector to the three components */
 +static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
 +                                                uint8_t *dest_y,
 +                                                uint8_t *dest_cb,
 +                                                uint8_t *dest_cr,
 +                                                int field_based,
 +                                                int bottom_field,
 +                                                int field_select,
 +                                                uint8_t **ref_picture,
 +                                                h264_chroma_mc_func *pix_op,
 +                                                int motion_x, int motion_y,
 +                                                int h, int mb_y)
 +{
 +    uint8_t *ptr_y, *ptr_cb, *ptr_cr;
 +    int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
 +    ptrdiff_t uvlinesize, linesize;
 +    const int lowres     = s->avctx->lowres;
 +    const int op_index   = FFMIN(lowres-1+s->chroma_x_shift, 3);
 +    const int block_s    = 8>>lowres;
 +    const int s_mask     = (2 << lowres) - 1;
 +    const int h_edge_pos = s->h_edge_pos >> lowres;
 +    const int v_edge_pos = s->v_edge_pos >> lowres;
 +    linesize   = s->current_picture.f->linesize[0] << field_based;
 +    uvlinesize = s->current_picture.f->linesize[1] << field_based;
 +
 +    // FIXME obviously not perfect but qpel will not work in lowres anyway
 +    if (s->quarter_sample) {
 +        motion_x /= 2;
 +        motion_y /= 2;
 +    }
 +
 +    if(field_based){
 +        motion_y += (bottom_field - field_select)*((1 << lowres)-1);
 +    }
 +
 +    sx = motion_x & s_mask;
 +    sy = motion_y & s_mask;
 +    src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
 +    src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
 +
 +    if (s->out_format == FMT_H263) {
 +        uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
 +        uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
 +        uvsrc_x = src_x >> 1;
 +        uvsrc_y = src_y >> 1;
 +    } else if (s->out_format == FMT_H261) {
 +        // even chroma mv's are full pel in H261
 +        mx      = motion_x / 4;
 +        my      = motion_y / 4;
 +        uvsx    = (2 * mx) & s_mask;
 +        uvsy    = (2 * my) & s_mask;
 +        uvsrc_x = s->mb_x * block_s + (mx >> lowres);
 +        uvsrc_y =    mb_y * block_s + (my >> lowres);
 +    } else {
 +        if(s->chroma_y_shift){
 +            mx      = motion_x / 2;
 +            my      = motion_y / 2;
 +            uvsx    = mx & s_mask;
 +            uvsy    = my & s_mask;
 +            uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
 +            uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
 +        } else {
 +            if(s->chroma_x_shift){
 +            //Chroma422
 +                mx = motion_x / 2;
 +                uvsx = mx & s_mask;
 +                uvsy = motion_y & s_mask;
 +                uvsrc_y = src_y;
 +                uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
 +            } else {
 +            //Chroma444
 +                uvsx = motion_x & s_mask;
 +                uvsy = motion_y & s_mask;
 +                uvsrc_x = src_x;
 +                uvsrc_y = src_y;
 +            }
 +        }
 +    }
 +
 +    ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
 +    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
 +    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
 +
 +    if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) || uvsrc_y<0 ||
 +        (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
 +        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
 +                                 linesize >> field_based, linesize >> field_based,
 +                                 17, 17 + field_based,
 +                                src_x, src_y << field_based, h_edge_pos,
 +                                v_edge_pos);
 +        ptr_y = s->sc.edge_emu_buffer;
 +        if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
 +            uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
 +            uint8_t *vbuf =ubuf + 9 * s->uvlinesize;
 +            s->vdsp.emulated_edge_mc(ubuf,  ptr_cb,
 +                                     uvlinesize >> field_based, uvlinesize >> field_based,
 +                                     9, 9 + field_based,
 +                                    uvsrc_x, uvsrc_y << field_based,
 +                                    h_edge_pos >> 1, v_edge_pos >> 1);
 +            s->vdsp.emulated_edge_mc(vbuf,  ptr_cr,
 +                                     uvlinesize >> field_based,uvlinesize >> field_based,
 +                                     9, 9 + field_based,
 +                                    uvsrc_x, uvsrc_y << field_based,
 +                                    h_edge_pos >> 1, v_edge_pos >> 1);
 +            ptr_cb = ubuf;
 +            ptr_cr = vbuf;
 +        }
 +    }
 +
 +    // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
 +    if (bottom_field) {
 +        dest_y  += s->linesize;
 +        dest_cb += s->uvlinesize;
 +        dest_cr += s->uvlinesize;
 +    }
 +
 +    if (field_select) {
 +        ptr_y   += s->linesize;
 +        ptr_cb  += s->uvlinesize;
 +        ptr_cr  += s->uvlinesize;
 +    }
 +
 +    sx = (sx << 2) >> lowres;
 +    sy = (sy << 2) >> lowres;
 +    pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
 +
 +    if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
 +        int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
 +        uvsx = (uvsx << 2) >> lowres;
 +        uvsy = (uvsy << 2) >> lowres;
 +        if (hc) {
 +            pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
 +            pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
 +        }
 +    }
 +    // FIXME h261 lowres loop filter
 +}
 +
 +static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
 +                                            uint8_t *dest_cb, uint8_t *dest_cr,
 +                                            uint8_t **ref_picture,
 +                                            h264_chroma_mc_func * pix_op,
 +                                            int mx, int my)
 +{
 +    const int lowres     = s->avctx->lowres;
 +    const int op_index   = FFMIN(lowres, 3);
 +    const int block_s    = 8 >> lowres;
 +    const int s_mask     = (2 << lowres) - 1;
 +    const int h_edge_pos = s->h_edge_pos >> lowres + 1;
 +    const int v_edge_pos = s->v_edge_pos >> lowres + 1;
 +    int emu = 0, src_x, src_y, sx, sy;
 +    ptrdiff_t offset;
 +    uint8_t *ptr;
 +
 +    if (s->quarter_sample) {
 +        mx /= 2;
 +        my /= 2;
 +    }
 +
 +    /* In case of 8X8, we construct a single chroma motion vector
 +       with a special rounding */
 +    mx = ff_h263_round_chroma(mx);
 +    my = ff_h263_round_chroma(my);
 +
 +    sx = mx & s_mask;
 +    sy = my & s_mask;
 +    src_x = s->mb_x * block_s + (mx >> lowres + 1);
 +    src_y = s->mb_y * block_s + (my >> lowres + 1);
 +
 +    offset = src_y * s->uvlinesize + src_x;
 +    ptr = ref_picture[1] + offset;
 +    if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
 +        (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
 +        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
 +                                 s->uvlinesize, s->uvlinesize,
 +                                 9, 9,
 +                                 src_x, src_y, h_edge_pos, v_edge_pos);
 +        ptr = s->sc.edge_emu_buffer;
 +        emu = 1;
 +    }
 +    sx = (sx << 2) >> lowres;
 +    sy = (sy << 2) >> lowres;
 +    pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
 +
 +    ptr = ref_picture[2] + offset;
 +    if (emu) {
 +        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
 +                                 s->uvlinesize, s->uvlinesize,
 +                                 9, 9,
 +                                 src_x, src_y, h_edge_pos, v_edge_pos);
 +        ptr = s->sc.edge_emu_buffer;
 +    }
 +    pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
 +}
 +
 +/**
 + * motion compensation of a single macroblock
 + * @param s context
 + * @param dest_y luma destination pointer
 + * @param dest_cb chroma cb/u destination pointer
 + * @param dest_cr chroma cr/v destination pointer
 + * @param dir direction (0->forward, 1->backward)
 + * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
 + * @param pix_op halfpel motion compensation function (average or put normally)
 + * the motion vectors are taken from s->mv and the MV type from s->mv_type
 + */
 +static inline void MPV_motion_lowres(MpegEncContext *s,
 +                                     uint8_t *dest_y, uint8_t *dest_cb,
 +                                     uint8_t *dest_cr,
 +                                     int dir, uint8_t **ref_picture,
 +                                     h264_chroma_mc_func *pix_op)
 +{
 +    int mx, my;
 +    int mb_x, mb_y, i;
 +    const int lowres  = s->avctx->lowres;
 +    const int block_s = 8 >>lowres;
 +
 +    mb_x = s->mb_x;
 +    mb_y = s->mb_y;
 +
 +    switch (s->mv_type) {
 +    case MV_TYPE_16X16:
 +        mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                           0, 0, 0,
 +                           ref_picture, pix_op,
 +                           s->mv[dir][0][0], s->mv[dir][0][1],
 +                           2 * block_s, mb_y);
 +        break;
 +    case MV_TYPE_8X8:
 +        mx = 0;
 +        my = 0;
 +        for (i = 0; i < 4; i++) {
 +            hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
 +                               s->linesize) * block_s,
 +                               ref_picture[0], 0, 0,
 +                               (2 * mb_x + (i & 1)) * block_s,
 +                               (2 * mb_y + (i >> 1)) * block_s,
 +                               s->width, s->height, s->linesize,
 +                               s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
 +                               block_s, block_s, pix_op,
 +                               s->mv[dir][i][0], s->mv[dir][i][1]);
 +
 +            mx += s->mv[dir][i][0];
 +            my += s->mv[dir][i][1];
 +        }
 +
 +        if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
 +            chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
 +                                     pix_op, mx, my);
 +        break;
 +    case MV_TYPE_FIELD:
 +        if (s->picture_structure == PICT_FRAME) {
 +            /* top field */
 +            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                               1, 0, s->field_select[dir][0],
 +                               ref_picture, pix_op,
 +                               s->mv[dir][0][0], s->mv[dir][0][1],
 +                               block_s, mb_y);
 +            /* bottom field */
 +            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                               1, 1, s->field_select[dir][1],
 +                               ref_picture, pix_op,
 +                               s->mv[dir][1][0], s->mv[dir][1][1],
 +                               block_s, mb_y);
 +        } else {
 +            if (s->picture_structure != s->field_select[dir][0] + 1 &&
 +                s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
 +                ref_picture = s->current_picture_ptr->f->data;
 +
 +            }
 +            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                               0, 0, s->field_select[dir][0],
 +                               ref_picture, pix_op,
 +                               s->mv[dir][0][0],
 +                               s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
 +            }
 +        break;
 +    case MV_TYPE_16X8:
 +        for (i = 0; i < 2; i++) {
 +            uint8_t **ref2picture;
 +
 +            if (s->picture_structure == s->field_select[dir][i] + 1 ||
 +                s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
 +                ref2picture = ref_picture;
 +            } else {
 +                ref2picture = s->current_picture_ptr->f->data;
 +            }
 +
 +            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                               0, 0, s->field_select[dir][i],
 +                               ref2picture, pix_op,
 +                               s->mv[dir][i][0], s->mv[dir][i][1] +
 +                               2 * block_s * i, block_s, mb_y >> 1);
 +
 +            dest_y  +=  2 * block_s *  s->linesize;
 +            dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
 +            dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
 +        }
 +        break;
 +    case MV_TYPE_DMV:
 +        if (s->picture_structure == PICT_FRAME) {
 +            for (i = 0; i < 2; i++) {
 +                int j;
 +                for (j = 0; j < 2; j++) {
 +                    mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                                       1, j, j ^ i,
 +                                       ref_picture, pix_op,
 +                                       s->mv[dir][2 * i + j][0],
 +                                       s->mv[dir][2 * i + j][1],
 +                                       block_s, mb_y);
 +                }
 +                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
 +            }
 +        } else {
 +            for (i = 0; i < 2; i++) {
 +                mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
 +                                   0, 0, s->picture_structure != i + 1,
 +                                   ref_picture, pix_op,
 +                                   s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
 +                                   2 * block_s, mb_y >> 1);
 +
 +                // after put we make avg of the same block
 +                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
 +
 +                // opposite parity is always in the same
 +                // frame if this is second field
 +                if (!s->first_field) {
 +                    ref_picture = s->current_picture_ptr->f->data;
                  }
              }
 -            av_log(s->avctx, AV_LOG_DEBUG, "\n");
          }
 +        break;
 +    default:
 +        av_assert2(0);
      }
  }
  




More information about the ffmpeg-cvslog mailing list