[FFmpeg-devel] [PATCH] avutil/motion_vector: export subpel motion information
Michael Niedermayer
michael at niedermayer.cc
Mon Nov 16 02:49:05 CET 2015
On Thu, Nov 12, 2015 at 03:03:33PM +0100, Clément Bœsch wrote:
> ---
> libavcodec/mpegvideo.c | 36 +++++++++++++++++++++---------------
> libavcodec/snowdec.c | 4 ++++
> libavfilter/vf_codecview.c | 7 +++++--
> libavutil/motion_vector.h | 8 ++++++++
> libavutil/version.h | 2 +-
> 5 files changed, 39 insertions(+), 18 deletions(-)
>
> diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
> index 96634ec..9ba1e09 100644
> --- a/libavcodec/mpegvideo.c
> +++ b/libavcodec/mpegvideo.c
> @@ -1556,15 +1556,21 @@ static void draw_arrow(uint8_t *buf, int sx, int sy, int ex,
>
> static int add_mb(AVMotionVector *mb, uint32_t mb_type,
> int dst_x, int dst_y,
> - int src_x, int src_y,
> + int motion_x, int motion_y, int motion_shift,
> int direction)
> {
> mb->w = IS_8X8(mb_type) || IS_8X16(mb_type) ? 8 : 16;
> mb->h = IS_8X8(mb_type) || IS_16X8(mb_type) ? 8 : 16;
> - mb->src_x = src_x;
> - mb->src_y = src_y;
> +
> + mb->motion_x = motion_x;
> + mb->motion_y = motion_y;
> + mb->motion_scale = 1;
> + mb->motion_shift = motion_shift;
> +
> mb->dst_x = dst_x;
> mb->dst_y = dst_y;
> + mb->src_x = dst_x + (motion_x >> motion_shift);
> + mb->src_y = dst_y + (motion_y >> motion_shift);
> mb->source = direction ? 1 : -1;
> mb->flags = 0; // XXX: does mb_type contain extra information that could be exported here?
> return 1;
> @@ -1603,43 +1609,43 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_
> int sy = mb_y * 16 + 4 + 8 * (i >> 1);
> int xy = (mb_x * 2 + (i & 1) +
> (mb_y * 2 + (i >> 1)) * mv_stride) << (mv_sample_log2 - 1);
> - int mx = (motion_val[direction][xy][0] >> shift) + sx;
> - int my = (motion_val[direction][xy][1] >> shift) + sy;
> - mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
> + int mx = motion_val[direction][xy][0];
> + int my = motion_val[direction][xy][1];
> + mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, shift, direction);
> }
> } else if (IS_16X8(mb_type)) {
> for (i = 0; i < 2; i++) {
> int sx = mb_x * 16 + 8;
> int sy = mb_y * 16 + 4 + 8 * i;
> int xy = (mb_x * 2 + (mb_y * 2 + i) * mv_stride) << (mv_sample_log2 - 1);
> - int mx = (motion_val[direction][xy][0] >> shift);
> - int my = (motion_val[direction][xy][1] >> shift);
> + int mx = motion_val[direction][xy][0];
> + int my = motion_val[direction][xy][1];
>
> if (IS_INTERLACED(mb_type))
> my *= 2;
>
> - mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
> + mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, shift, direction);
> }
> } else if (IS_8X16(mb_type)) {
> for (i = 0; i < 2; i++) {
> int sx = mb_x * 16 + 4 + 8 * i;
> int sy = mb_y * 16 + 8;
> int xy = (mb_x * 2 + i + mb_y * 2 * mv_stride) << (mv_sample_log2 - 1);
> - int mx = motion_val[direction][xy][0] >> shift;
> - int my = motion_val[direction][xy][1] >> shift;
> + int mx = motion_val[direction][xy][0];
> + int my = motion_val[direction][xy][1];
>
> if (IS_INTERLACED(mb_type))
> my *= 2;
>
> - mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx + sx, my + sy, direction);
> + mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, shift, direction);
> }
> } else {
> int sx = mb_x * 16 + 8;
> int sy = mb_y * 16 + 8;
> int xy = (mb_x + mb_y * mv_stride) << mv_sample_log2;
> - int mx = (motion_val[direction][xy][0]>>shift) + sx;
> - int my = (motion_val[direction][xy][1]>>shift) + sy;
> - mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, direction);
> + int mx = motion_val[direction][xy][0];
> + int my = motion_val[direction][xy][1];
> + mbcount += add_mb(mvs + mbcount, mb_type, sx, sy, mx, my, shift, direction);
> }
> }
> }
> diff --git a/libavcodec/snowdec.c b/libavcodec/snowdec.c
> index 1b288dd..0746b65 100644
> --- a/libavcodec/snowdec.c
> +++ b/libavcodec/snowdec.c
> @@ -104,6 +104,10 @@ static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer
> avmv->h = block_h;
> avmv->dst_x = block_w*mb_x - block_w/2;
> avmv->dst_y = block_h*mb_y - block_h/2;
> + avmv->motion_scale = s->mv_scale;
> + avmv->motion_shift = 3;
> + avmv->motion_x = bn->mx;
> + avmv->motion_y = bn->my;
> avmv->src_x = avmv->dst_x + (bn->mx * s->mv_scale)/8;
> avmv->src_y = avmv->dst_y + (bn->my * s->mv_scale)/8;
> avmv->source= -1 - bn->ref;
> diff --git a/libavfilter/vf_codecview.c b/libavfilter/vf_codecview.c
> index df45f55..3de7f5d 100644
> --- a/libavfilter/vf_codecview.c
> +++ b/libavfilter/vf_codecview.c
> @@ -207,10 +207,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
> const int direction = mv->source > 0;
> if ((direction == 0 && (s->mv & MV_P_FOR) && frame->pict_type == AV_PICTURE_TYPE_P) ||
> (direction == 0 && (s->mv & MV_B_FOR) && frame->pict_type == AV_PICTURE_TYPE_B) ||
> - (direction == 1 && (s->mv & MV_B_BACK) && frame->pict_type == AV_PICTURE_TYPE_B))
> - draw_arrow(frame->data[0], mv->dst_x, mv->dst_y, mv->src_x, mv->src_y,
> + (direction == 1 && (s->mv & MV_B_BACK) && frame->pict_type == AV_PICTURE_TYPE_B)) {
> + const int src_x = mv->dst_x + ((mv->motion_x * mv->motion_scale) >> mv->motion_shift);
> + const int src_y = mv->dst_y + ((mv->motion_y * mv->motion_scale) >> mv->motion_shift);
> + draw_arrow(frame->data[0], mv->dst_x, mv->dst_y, src_x, src_y,
> frame->width, frame->height, frame->linesize[0],
> 100, 0, mv->source > 0);
> + }
> }
> }
> return ff_filter_frame(outlink, frame);
> diff --git a/libavutil/motion_vector.h b/libavutil/motion_vector.h
> index 30cfb99..9d946c4 100644
> --- a/libavutil/motion_vector.h
> +++ b/libavutil/motion_vector.h
> @@ -45,6 +45,14 @@ typedef struct AVMotionVector {
> * Currently unused.
> */
> uint64_t flags;
> + /**
> + * Motion vector
> + * src_x = dst_x + (motion_x * motion_scale) >> motion_shift
> + * src_y = dst_y + (motion_y * motion_scale) >> motion_shift
> + */
> + int32_t motion_x, motion_y;
> + int16_t motion_scale;
> + uint8_t motion_shift;
i think this doesnt support libavcodec/tpeldsp.c as used in svq3
that would need a motion_xy / 3
i might be missing something but
* Motion vector
* src_x = dst_x + motion_x / motion_scale
* src_y = dst_y + motion_y / motion_scale
*/
int32_t motion_x, motion_y;
uint8_t motion_scale; // This is almost always a power of 2
should be enough to support all things
The "*motion_scale" should not be needed, the exportet vectors
can just be multiplied up if thats needed
[...]
--
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB
No snowflake in an avalanche ever feels responsible. -- Voltaire
-------------- next part --------------
A non-text attachment was scrubbed...
Name: signature.asc
Type: application/pgp-signature
Size: 181 bytes
Desc: Digital signature
URL: <http://ffmpeg.org/pipermail/ffmpeg-devel/attachments/20151116/96215475/attachment.sig>
More information about the ffmpeg-devel
mailing list