[FFmpeg-cvslog] r25042 - in trunk/libavcodec/x86: h264_weight.asm vc1dsp_yasm.asm vp3dsp.asm vp8dsp.asm

reimar subversion
Sun Sep 5 12:10:16 CEST 2010


Author: reimar
Date: Sun Sep  5 12:10:16 2010
New Revision: 25042

Log:
Use "d" suffix for general-purpose registers used with movd.
This increases compatibilty with nasm and is also more consistent,
e.g. with h264_intrapred.asm and h264_chromamc.asm that already
do it that way.

Modified:
   trunk/libavcodec/x86/h264_weight.asm
   trunk/libavcodec/x86/vc1dsp_yasm.asm
   trunk/libavcodec/x86/vp3dsp.asm
   trunk/libavcodec/x86/vp8dsp.asm

Modified: trunk/libavcodec/x86/h264_weight.asm
==============================================================================
--- trunk/libavcodec/x86/h264_weight.asm	Sat Sep  4 12:05:03 2010	(r25041)
+++ trunk/libavcodec/x86/h264_weight.asm	Sun Sep  5 12:10:16 2010	(r25042)
@@ -40,9 +40,9 @@ SECTION .text
 %macro WEIGHT_SETUP 0
     add        r4, r4
     inc        r4
-    movd       m3, r3
-    movd       m5, r4
-    movd       m6, r2
+    movd       m3, r3d
+    movd       m5, r4d
+    movd       m6, r2d
     pslld      m5, m6
     psrld      m5, 1
 %if mmsize == 16
@@ -156,10 +156,10 @@ WEIGHT_FUNC_HALF_MM 8,  4, 16, 8, sse2
     add        r6, 1
     or         r6, 1
     add        r3, 1
-    movd       m3, r4
-    movd       m4, r5
-    movd       m5, r6
-    movd       m6, r3
+    movd       m3, r4d
+    movd       m4, r5d
+    movd       m5, r6d
+    movd       m6, r3d
     pslld      m5, m6
     psrld      m5, 1
 %if mmsize == 16
@@ -291,10 +291,10 @@ BIWEIGHT_FUNC_HALF_MM 8,  4, 16, 8, sse2
     add        r6, 1
     or         r6, 1
     add        r3, 1
-    movd       m4, r4
-    movd       m0, r5
-    movd       m5, r6
-    movd       m6, r3
+    movd       m4, r4d
+    movd       m0, r5d
+    movd       m5, r6d
+    movd       m6, r3d
     pslld      m5, m6
     psrld      m5, 1
     punpcklbw  m4, m0

Modified: trunk/libavcodec/x86/vc1dsp_yasm.asm
==============================================================================
--- trunk/libavcodec/x86/vc1dsp_yasm.asm	Sat Sep  4 12:05:03 2010	(r25041)
+++ trunk/libavcodec/x86/vc1dsp_yasm.asm	Sun Sep  5 12:10:16 2010	(r25042)
@@ -36,7 +36,7 @@ section .text
 %endmacro
 
 %macro STORE_4_WORDS_MMX 6
-    movd   %6, %5
+    movd  %6d, %5
 %if mmsize==16
     psrldq %5, 4
 %else
@@ -45,7 +45,7 @@ section .text
     mov    %1, %6w
     shr    %6, 16
     mov    %2, %6w
-    movd   %6, %5
+    movd  %6d, %5
     mov    %3, %6w
     shr    %6, 16
     mov    %4, %6w
@@ -88,7 +88,7 @@ section .text
     pxor    m7, m3  ; d_sign ^= a0_sign
 
     pxor    m5, m5
-    movd    m3, r2
+    movd    m3, r2d
 %if %1 > 4
     punpcklbw m3, m3
 %endif

Modified: trunk/libavcodec/x86/vp3dsp.asm
==============================================================================
--- trunk/libavcodec/x86/vp3dsp.asm	Sat Sep  4 12:05:03 2010	(r25041)
+++ trunk/libavcodec/x86/vp3dsp.asm	Sun Sep  5 12:10:16 2010	(r25042)
@@ -93,12 +93,12 @@ SECTION .text
 %endmacro
 
 %macro STORE_4_WORDS 1
-    movd          r2, %1
+    movd         r2d, %1
     mov  [r0     -1], r2w
     psrlq         %1, 32
     shr           r2, 16
     mov  [r0+r1  -1], r2w
-    movd          r2, %1
+    movd         r2d, %1
     mov  [r0+r1*2-1], r2w
     shr           r2, 16
     mov  [r0+r3  -1], r2w
@@ -606,7 +606,7 @@ cglobal vp3_idct_dc_add_mmx2, 3, 4
     movsx         r2, word [r2]
     add           r2, 15
     sar           r2, 5
-    movd          m0, r2
+    movd          m0, r2d
     pshufw        m0, m0, 0x0
     pxor          m1, m1
     psubw         m1, m0

Modified: trunk/libavcodec/x86/vp8dsp.asm
==============================================================================
--- trunk/libavcodec/x86/vp8dsp.asm	Sat Sep  4 12:05:03 2010	(r25041)
+++ trunk/libavcodec/x86/vp8dsp.asm	Sun Sep  5 12:10:16 2010	(r25042)
@@ -1342,7 +1342,7 @@ VP8_DC_WHT sse
     psrldq        m%2, 4
 %if %10 == 8
     movd    [%5+%8*2], m%1
-    movd           %5, m%3
+    movd          %5d, m%3
 %endif
     psrldq        m%3, 4
     psrldq        m%4, 4
@@ -1379,26 +1379,26 @@ VP8_DC_WHT sse
 ; 4 is a pointer to the destination's 4th line
 ; 5/6 is -stride and +stride
 %macro WRITE_2x4W 6
-    movd             %3, %1
+    movd            %3d, %1
     punpckhdq        %1, %1
     mov       [%4+%5*4], %3w
     shr              %3, 16
     add              %4, %6
     mov       [%4+%5*4], %3w
 
-    movd             %3, %1
+    movd            %3d, %1
     add              %4, %5
     mov       [%4+%5*2], %3w
     shr              %3, 16
     mov       [%4+%5  ], %3w
 
-    movd             %3, %2
+    movd            %3d, %2
     punpckhdq        %2, %2
     mov       [%4     ], %3w
     shr              %3, 16
     mov       [%4+%6  ], %3w
 
-    movd             %3, %2
+    movd            %3d, %2
     add              %4, %6
     mov       [%4+%6  ], %3w
     shr              %3, 16
@@ -1407,27 +1407,27 @@ VP8_DC_WHT sse
 %endmacro
 
 %macro WRITE_8W_SSE2 5
-    movd             %2, %1
+    movd            %2d, %1
     psrldq           %1, 4
     mov       [%3+%4*4], %2w
     shr              %2, 16
     add              %3, %5
     mov       [%3+%4*4], %2w
 
-    movd             %2, %1
+    movd            %2d, %1
     psrldq           %1, 4
     add              %3, %4
     mov       [%3+%4*2], %2w
     shr              %2, 16
     mov       [%3+%4  ], %2w
 
-    movd             %2, %1
+    movd            %2d, %1
     psrldq           %1, 4
     mov       [%3     ], %2w
     shr              %2, 16
     mov       [%3+%5  ], %2w
 
-    movd             %2, %1
+    movd            %2d, %1
     add              %3, %5
     mov       [%3+%5  ], %2w
     shr              %2, 16
@@ -1446,27 +1446,27 @@ VP8_DC_WHT sse
 %endmacro
 
 %macro SPLATB_REG_MMX 2-3
-    movd           %1, %2
+    movd           %1, %2d
     punpcklbw      %1, %1
     punpcklwd      %1, %1
     punpckldq      %1, %1
 %endmacro
 
 %macro SPLATB_REG_MMXEXT 2-3
-    movd           %1, %2
+    movd           %1, %2d
     punpcklbw      %1, %1
     pshufw         %1, %1, 0x0
 %endmacro
 
 %macro SPLATB_REG_SSE2 2-3
-    movd           %1, %2
+    movd           %1, %2d
     punpcklbw      %1, %1
     pshuflw        %1, %1, 0x0
     punpcklqdq     %1, %1
 %endmacro
 
 %macro SPLATB_REG_SSSE3 3
-    movd           %1, %2
+    movd           %1, %2d
     pshufb         %1, %3
 %endmacro
 



More information about the ffmpeg-cvslog mailing list