[FFmpeg-devel] [PATCH] Replace ASMALIGN() with .p2align

Mans Rullgard mans
Wed Jun 2 15:50:16 CEST 2010


The test for .align syntax was apparently unreliable with some compilers.
Using the .p2align directive instead avoids the need for this test.
---
 configure                                 |    7 -------
 libavcodec/x86/dsputil_mmx.c              |    8 ++++----
 libavcodec/x86/dsputil_mmx_avg_template.c |    2 +-
 libavcodec/x86/dsputil_mmx_qns_template.c |    4 ++--
 libavcodec/x86/dsputil_mmx_rnd_template.c |   16 ++++++++--------
 libavcodec/x86/dsputilenc_mmx.c           |    4 ++--
 libavcodec/x86/idct_sse2_xvid.c           |    2 +-
 libavcodec/x86/motion_est_mmx.c           |   16 ++++++++--------
 libavcodec/x86/mpegvideo_mmx.c            |   12 ++++++------
 libavcodec/x86/mpegvideo_mmx_template.c   |    4 ++--
 libavcodec/x86/simple_idct_mmx.c          |   14 +++++++-------
 libavcodec/x86/vc1dsp_mmx.c               |    6 +++---
 12 files changed, 44 insertions(+), 51 deletions(-)

diff --git a/configure b/configure
index 4f65143..d2635ad 100755
--- a/configure
+++ b/configure
@@ -2794,9 +2794,6 @@ if enabled gprof; then
     add_ldflags -p
 fi
 
-# Find out if the .align argument is a power of two or not.
-check_asm asmalign_pot '".align 3"'
-
 enabled_any $THREADS_LIST      && enable threads
 
 check_deps $CONFIG_LIST       \
@@ -2819,7 +2816,6 @@ enabled asm || { arch=c; disable $ARCH_LIST $ARCH_EXT_LIST; }
 echo "install prefix            $prefix"
 echo "source path               $source_path"
 echo "C compiler                $cc"
-echo ".align is power-of-two    $asmalign_pot"
 echo "ARCH                      $arch ($cpu)"
 if test "$build_suffix" != ""; then
     echo "build suffix              $build_suffix"
@@ -3051,8 +3047,6 @@ get_version LIBAVFORMAT libavformat/avformat.h
 get_version LIBAVUTIL   libavutil/avutil.h
 get_version LIBAVFILTER libavfilter/avfilter.h
 
-enabled asmalign_pot || align_shift="1 <<"
-
 cat > $TMPH <<EOF
 /* Automatically generated by configure - do not modify! */
 #ifndef FFMPEG_CONFIG_H
@@ -3063,7 +3057,6 @@ cat > $TMPH <<EOF
 #define CC_TYPE "$cc_type"
 #define CC_VERSION $cc_version
 #define restrict $_restrict
-#define ASMALIGN(ZEROBITS) ".align $align_shift " #ZEROBITS "\\n\\t"
 #define EXTERN_PREFIX "${extern_prefix}"
 #define EXTERN_ASM ${extern_prefix}
 EOF
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index cc2f881..7bb3296 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -73,7 +73,7 @@ DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
 
-#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
+#define JUMPALIGN() __asm__ volatile (".p2align 3"::)
 #define MOVQ_ZERO(regd)  __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
 
 #define MOVQ_BFE(regd) \
@@ -360,7 +360,7 @@ static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size
 {
     __asm__ volatile(
          "lea (%3, %3), %%"REG_a"       \n\t"
-         ASMALIGN(3)
+         ".p2align 3                    \n\t"
          "1:                            \n\t"
          "movd (%1), %%mm0              \n\t"
          "movd (%1, %3), %%mm1          \n\t"
@@ -386,7 +386,7 @@ static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size
 {
     __asm__ volatile(
          "lea (%3, %3), %%"REG_a"       \n\t"
-         ASMALIGN(3)
+         ".p2align 3                    \n\t"
          "1:                            \n\t"
          "movq (%1), %%mm0              \n\t"
          "movq (%1, %3), %%mm1          \n\t"
@@ -412,7 +412,7 @@ static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_siz
 {
     __asm__ volatile(
          "lea (%3, %3), %%"REG_a"       \n\t"
-         ASMALIGN(3)
+         ".p2align 3                    \n\t"
          "1:                            \n\t"
          "movq (%1), %%mm0              \n\t"
          "movq 8(%1), %%mm4             \n\t"
diff --git a/libavcodec/x86/dsputil_mmx_avg_template.c b/libavcodec/x86/dsputil_mmx_avg_template.c
index 8220867..d0e6e01 100644
--- a/libavcodec/x86/dsputil_mmx_avg_template.c
+++ b/libavcodec/x86/dsputil_mmx_avg_template.c
@@ -757,7 +757,7 @@ static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line
         "lea (%3, %3), %%"REG_a"        \n\t"
         "movq (%1), %%mm0               \n\t"
         PAVGB" 1(%1), %%mm0             \n\t"
-         ASMALIGN(3)
+         ".p2align 3                    \n\t"
         "1:                             \n\t"
         "movq (%1, %%"REG_a"), %%mm2    \n\t"
         "movq (%1, %3), %%mm1           \n\t"
diff --git a/libavcodec/x86/dsputil_mmx_qns_template.c b/libavcodec/x86/dsputil_mmx_qns_template.c
index d2dbfc5..77a41b9 100644
--- a/libavcodec/x86/dsputil_mmx_qns_template.c
+++ b/libavcodec/x86/dsputil_mmx_qns_template.c
@@ -37,7 +37,7 @@ static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[
         "movd  %4, %%mm5                \n\t"
         "punpcklwd %%mm5, %%mm5         \n\t"
         "punpcklwd %%mm5, %%mm5         \n\t"
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq  (%1, %0), %%mm0          \n\t"
         "movq  8(%1, %0), %%mm1         \n\t"
@@ -77,7 +77,7 @@ static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
                 "movd  %3, %%mm5        \n\t"
                 "punpcklwd %%mm5, %%mm5 \n\t"
                 "punpcklwd %%mm5, %%mm5 \n\t"
-                ASMALIGN(4)
+                ".p2align 4             \n\t"
                 "1:                     \n\t"
                 "movq  (%1, %0), %%mm0  \n\t"
                 "movq  8(%1, %0), %%mm1 \n\t"
diff --git a/libavcodec/x86/dsputil_mmx_rnd_template.c b/libavcodec/x86/dsputil_mmx_rnd_template.c
index 2fc1756..e4c9138 100644
--- a/libavcodec/x86/dsputil_mmx_rnd_template.c
+++ b/libavcodec/x86/dsputil_mmx_rnd_template.c
@@ -30,7 +30,7 @@ static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line
     MOVQ_BFE(mm6);
     __asm__ volatile(
         "lea    (%3, %3), %%"REG_a"     \n\t"
-        ASMALIGN(3)
+        ".p2align 3                     \n\t"
         "1:                             \n\t"
         "movq   (%1), %%mm0             \n\t"
         "movq   1(%1), %%mm1            \n\t"
@@ -71,7 +71,7 @@ static void av_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t
         "movq   %%mm4, (%3)             \n\t"
         "add    %5, %3                  \n\t"
         "decl   %0                      \n\t"
-        ASMALIGN(3)
+        ".p2align 3                     \n\t"
         "1:                             \n\t"
         "movq   (%1), %%mm0             \n\t"
         "movq   (%2), %%mm1             \n\t"
@@ -112,7 +112,7 @@ static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int lin
     MOVQ_BFE(mm6);
     __asm__ volatile(
         "lea        (%3, %3), %%"REG_a" \n\t"
-        ASMALIGN(3)
+        ".p2align 3                     \n\t"
         "1:                             \n\t"
         "movq   (%1), %%mm0             \n\t"
         "movq   1(%1), %%mm1            \n\t"
@@ -170,7 +170,7 @@ static void av_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t
         "movq   %%mm5, 8(%3)            \n\t"
         "add    %5, %3                  \n\t"
         "decl   %0                      \n\t"
-        ASMALIGN(3)
+        ".p2align 3                     \n\t"
         "1:                             \n\t"
         "movq   (%1), %%mm0             \n\t"
         "movq   (%2), %%mm1             \n\t"
@@ -208,7 +208,7 @@ static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line
     __asm__ volatile(
         "lea (%3, %3), %%"REG_a"        \n\t"
         "movq (%1), %%mm0               \n\t"
-        ASMALIGN(3)
+        ".p2align 3                     \n\t"
         "1:                             \n\t"
         "movq   (%1, %3), %%mm1         \n\t"
         "movq   (%1, %%"REG_a"),%%mm2   \n\t"
@@ -248,7 +248,7 @@ static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int lin
         "paddusw %%mm1, %%mm5           \n\t"
         "xor    %%"REG_a", %%"REG_a"    \n\t"
         "add    %3, %1                  \n\t"
-        ASMALIGN(3)
+        ".p2align 3                     \n\t"
         "1:                             \n\t"
         "movq   (%1, %%"REG_a"), %%mm0  \n\t"
         "movq   1(%1, %%"REG_a"), %%mm2 \n\t"
@@ -460,7 +460,7 @@ static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line
     __asm__ volatile(
         "lea    (%3, %3), %%"REG_a"     \n\t"
         "movq   (%1), %%mm0             \n\t"
-        ASMALIGN(3)
+        ".p2align 3                     \n\t"
         "1:                             \n\t"
         "movq   (%1, %3), %%mm1         \n\t"
         "movq   (%1, %%"REG_a"), %%mm2  \n\t"
@@ -511,7 +511,7 @@ static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int lin
         "paddusw %%mm1, %%mm5           \n\t"
         "xor    %%"REG_a", %%"REG_a"    \n\t"
         "add    %3, %1                  \n\t"
-        ASMALIGN(3)
+        ".p2align 3                     \n\t"
         "1:                             \n\t"
         "movq   (%1, %%"REG_a"), %%mm0  \n\t"
         "movq   1(%1, %%"REG_a"), %%mm2 \n\t"
diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c
index f491111..886b1e9 100644
--- a/libavcodec/x86/dsputilenc_mmx.c
+++ b/libavcodec/x86/dsputilenc_mmx.c
@@ -34,7 +34,7 @@ static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
     __asm__ volatile(
         "mov $-128, %%"REG_a"           \n\t"
         "pxor %%mm7, %%mm7              \n\t"
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%0), %%mm0               \n\t"
         "movq (%0, %2), %%mm2           \n\t"
@@ -96,7 +96,7 @@ static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint
     __asm__ volatile(
         "pxor %%mm7, %%mm7              \n\t"
         "mov $-128, %%"REG_a"           \n\t"
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%0), %%mm0               \n\t"
         "movq (%1), %%mm2               \n\t"
diff --git a/libavcodec/x86/idct_sse2_xvid.c b/libavcodec/x86/idct_sse2_xvid.c
index fc670e2..f64261d 100644
--- a/libavcodec/x86/idct_sse2_xvid.c
+++ b/libavcodec/x86/idct_sse2_xvid.c
@@ -355,7 +355,7 @@ inline void ff_idct_xvid_sse2(short *block)
     TEST_TWO_ROWS("5*16(%0)", "6*16(%0)", "%%eax", "%%edx", CLEAR_ODD(ROW5), CLEAR_EVEN(ROW6))
     TEST_ONE_ROW("7*16(%0)", "%%esi", CLEAR_ODD(ROW7))
     iLLM_HEAD
-    ASMALIGN(4)
+    ".p2align 4 \n\t"
     JNZ("%%ecx", "2f")
     JNZ("%%eax", "3f")
     JNZ("%%edx", "4f")
diff --git a/libavcodec/x86/motion_est_mmx.c b/libavcodec/x86/motion_est_mmx.c
index 0272410..91c7582 100644
--- a/libavcodec/x86/motion_est_mmx.c
+++ b/libavcodec/x86/motion_est_mmx.c
@@ -38,7 +38,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
     x86_reg len= -(stride*h);
     __asm__ volatile(
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%1, %%"REG_a"), %%mm0    \n\t"
         "movq (%2, %%"REG_a"), %%mm2    \n\t"
@@ -73,7 +73,7 @@ static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
     __asm__ volatile(
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%1), %%mm0               \n\t"
         "movq (%1, %3), %%mm1           \n\t"
@@ -95,7 +95,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
     int ret;
     __asm__ volatile(
         "pxor %%xmm6, %%xmm6            \n\t"
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movdqu (%1), %%xmm0            \n\t"
         "movdqu (%1, %3), %%xmm1        \n\t"
@@ -122,7 +122,7 @@ static int sad16_sse2(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)
 static inline void sad8_x2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
 {
     __asm__ volatile(
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%1), %%mm0               \n\t"
         "movq (%1, %3), %%mm1           \n\t"
@@ -146,7 +146,7 @@ static inline void sad8_y2a_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h
     __asm__ volatile(
         "movq (%1), %%mm0               \n\t"
         "add %3, %1                     \n\t"
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%1), %%mm1               \n\t"
         "movq (%1, %3), %%mm2           \n\t"
@@ -173,7 +173,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
         "movq (%1), %%mm0               \n\t"
         "pavgb 1(%1), %%mm0             \n\t"
         "add %3, %1                     \n\t"
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%1), %%mm1               \n\t"
         "movq (%1,%3), %%mm2            \n\t"
@@ -200,7 +200,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int
 {
     x86_reg len= -(stride*h);
     __asm__ volatile(
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%1, %%"REG_a"), %%mm0    \n\t"
         "movq (%2, %%"REG_a"), %%mm1    \n\t"
@@ -248,7 +248,7 @@ static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
         "punpckhbw %%mm7, %%mm3         \n\t"
         "paddw %%mm2, %%mm0             \n\t"
         "paddw %%mm3, %%mm1             \n\t"
-        ASMALIGN(4)
+        ".p2align 4                     \n\t"
         "1:                             \n\t"
         "movq (%2, %%"REG_a"), %%mm2    \n\t"
         "movq 1(%2, %%"REG_a"), %%mm4   \n\t"
diff --git a/libavcodec/x86/mpegvideo_mmx.c b/libavcodec/x86/mpegvideo_mmx.c
index 5deb68d..f89100e 100644
--- a/libavcodec/x86/mpegvideo_mmx.c
+++ b/libavcodec/x86/mpegvideo_mmx.c
@@ -65,7 +65,7 @@ __asm__ volatile(
                 "packssdw %%mm5, %%mm5          \n\t"
                 "psubw %%mm5, %%mm7             \n\t"
                 "pxor %%mm4, %%mm4              \n\t"
-                ASMALIGN(4)
+                ".p2align 4                     \n\t"
                 "1:                             \n\t"
                 "movq (%0, %3), %%mm0           \n\t"
                 "movq 8(%0, %3), %%mm1          \n\t"
@@ -128,7 +128,7 @@ __asm__ volatile(
                 "packssdw %%mm5, %%mm5          \n\t"
                 "psubw %%mm5, %%mm7             \n\t"
                 "pxor %%mm4, %%mm4              \n\t"
-                ASMALIGN(4)
+                ".p2align 4                     \n\t"
                 "1:                             \n\t"
                 "movq (%0, %3), %%mm0           \n\t"
                 "movq 8(%0, %3), %%mm1          \n\t"
@@ -221,7 +221,7 @@ __asm__ volatile(
                 "packssdw %%mm6, %%mm6          \n\t"
                 "packssdw %%mm6, %%mm6          \n\t"
                 "mov %3, %%"REG_a"              \n\t"
-                ASMALIGN(4)
+                ".p2align 4                     \n\t"
                 "1:                             \n\t"
                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
@@ -284,7 +284,7 @@ __asm__ volatile(
                 "packssdw %%mm6, %%mm6          \n\t"
                 "packssdw %%mm6, %%mm6          \n\t"
                 "mov %3, %%"REG_a"              \n\t"
-                ASMALIGN(4)
+                ".p2align 4                     \n\t"
                 "1:                             \n\t"
                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
@@ -356,7 +356,7 @@ __asm__ volatile(
                 "packssdw %%mm6, %%mm6          \n\t"
                 "packssdw %%mm6, %%mm6          \n\t"
                 "mov %3, %%"REG_a"              \n\t"
-                ASMALIGN(4)
+                ".p2align 4                     \n\t"
                 "1:                             \n\t"
                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
@@ -417,7 +417,7 @@ __asm__ volatile(
                 "packssdw %%mm6, %%mm6          \n\t"
                 "packssdw %%mm6, %%mm6          \n\t"
                 "mov %3, %%"REG_a"              \n\t"
-                ASMALIGN(4)
+                ".p2align 4                     \n\t"
                 "1:                             \n\t"
                 "movq (%0, %%"REG_a"), %%mm0    \n\t"
                 "movq 8(%0, %%"REG_a"), %%mm1   \n\t"
diff --git a/libavcodec/x86/mpegvideo_mmx_template.c b/libavcodec/x86/mpegvideo_mmx_template.c
index 0d92792..a7e999f 100644
--- a/libavcodec/x86/mpegvideo_mmx_template.c
+++ b/libavcodec/x86/mpegvideo_mmx_template.c
@@ -158,7 +158,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
             "pxor "MM"6, "MM"6                  \n\t"
             "psubw (%3), "MM"6                  \n\t" // -bias[0]
             "mov $-128, %%"REG_a"               \n\t"
-            ASMALIGN(4)
+            ".p2align 4                         \n\t"
             "1:                                 \n\t"
             MOVQ" (%1, %%"REG_a"), "MM"0        \n\t" // block[i]
             SAVE_SIGN(MM"1", MM"0")                   // ABS(block[i])
@@ -188,7 +188,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
             "pxor "MM"7, "MM"7                  \n\t" // 0
             "pxor "MM"4, "MM"4                  \n\t" // 0
             "mov $-128, %%"REG_a"               \n\t"
-            ASMALIGN(4)
+            ".p2align 4                         \n\t"
             "1:                                 \n\t"
             MOVQ" (%1, %%"REG_a"), "MM"0        \n\t" // block[i]
             SAVE_SIGN(MM"1", MM"0")                   // ABS(block[i])
diff --git a/libavcodec/x86/simple_idct_mmx.c b/libavcodec/x86/simple_idct_mmx.c
index 5ea4c84..abbc0f2 100644
--- a/libavcodec/x86/simple_idct_mmx.c
+++ b/libavcodec/x86/simple_idct_mmx.c
@@ -789,7 +789,7 @@ IDCT(  16(%1), 80(%1), 48(%1), 112(%1),  8(%0), 20)
 IDCT(  24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
         "jmp 9f                         \n\t"
 
-        "#" ASMALIGN(4)                      \
+        "# .p2align 4                   \n\t"\
         "4:                             \n\t"
 Z_COND_IDCT(  64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 6f)
 Z_COND_IDCT(  96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f)
@@ -864,7 +864,7 @@ IDCT(  16(%1), 80(%1), 48(%1), 112(%1),  8(%0), 20)
 IDCT(  24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
         "jmp 9f                         \n\t"
 
-        "#" ASMALIGN(4)                      \
+        "# .p2align 4                   \n\t"\
         "6:                             \n\t"
 Z_COND_IDCT(  96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f)
 
@@ -930,7 +930,7 @@ IDCT(  16(%1), 80(%1), 48(%1), 112(%1),  8(%0), 20)
 IDCT(  24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
         "jmp 9f                         \n\t"
 
-        "#" ASMALIGN(4)                      \
+        "# .p2align 4                   \n\t"\
         "2:                             \n\t"
 Z_COND_IDCT(  96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f)
 
@@ -1007,7 +1007,7 @@ IDCT(  16(%1), 80(%1), 48(%1), 112(%1),  8(%0), 20)
 IDCT(  24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
         "jmp 9f                         \n\t"
 
-        "#" ASMALIGN(4)                      \
+        "# .p2align 4                   \n\t"\
         "3:                             \n\t"
 #undef IDCT
 #define IDCT(src0, src4, src1, src5, dst, shift) \
@@ -1071,7 +1071,7 @@ IDCT(  16(%1), 80(%1), 48(%1), 112(%1),  8(%0), 20)
 IDCT(  24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
         "jmp 9f                         \n\t"
 
-        "#" ASMALIGN(4)                      \
+        "# .p2align 4                   \n\t"\
         "5:                             \n\t"
 #undef IDCT
 #define IDCT(src0, src4, src1, src5, dst, shift) \
@@ -1136,7 +1136,7 @@ IDCT(  16(%1), 80(%1), 48(%1), 112(%1),  8(%0), 20)
         "jmp 9f                         \n\t"
 
 
-        "#" ASMALIGN(4)                      \
+        "# .p2align 4                   \n\t"\
         "1:                             \n\t"
 #undef IDCT
 #define IDCT(src0, src4, src1, src5, dst, shift) \
@@ -1210,7 +1210,7 @@ IDCT(  24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20)
         "jmp 9f                         \n\t"
 
 
-        "#" ASMALIGN(4)
+        "# .p2align 4                   \n\t"
         "7:                             \n\t"
 #undef IDCT
 #define IDCT(src0, src4, src1, src5, dst, shift) \
diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c
index e0b1f5b..092976f 100644
--- a/libavcodec/x86/vc1dsp_mmx.c
+++ b/libavcodec/x86/vc1dsp_mmx.c
@@ -283,7 +283,7 @@ vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src,      \
         LOAD_ROUNDER_MMX("%5")                                          \
         "movq      "MANGLE(ff_pw_53)", %%mm5\n\t"                       \
         "movq      "MANGLE(ff_pw_18)", %%mm6\n\t"                       \
-        ASMALIGN(3)                                                     \
+        ".p2align 3                \n\t"                                \
         "1:                        \n\t"                                \
         MSPEL_FILTER13_CORE(DO_UNPACK, "movd  1", A1, A2, A3, A4)       \
         NORMALIZE_MMX("%6")                                             \
@@ -339,7 +339,7 @@ OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride,    \
         LOAD_ROUNDER_MMX("%4")                                          \
         "movq      "MANGLE(ff_pw_18)", %%mm6   \n\t"                    \
         "movq      "MANGLE(ff_pw_53)", %%mm5   \n\t"                    \
-        ASMALIGN(3)                                                     \
+        ".p2align 3                \n\t"                                \
         "1:                        \n\t"                                \
         MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4)      \
         NORMALIZE_MMX("$7")                                             \
@@ -377,7 +377,7 @@ OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src,         \
         LOAD_ROUNDER_MMX("%6")                                          \
         "movq      "MANGLE(ff_pw_53)", %%mm5       \n\t"                \
         "movq      "MANGLE(ff_pw_18)", %%mm6       \n\t"                \
-        ASMALIGN(3)                                                     \
+        ".p2align 3                \n\t"                                \
         "1:                        \n\t"                                \
         MSPEL_FILTER13_CORE(DO_UNPACK, "movd   1", A1, A2, A3, A4)      \
         NORMALIZE_MMX("$6")                                             \
-- 
1.7.1




More information about the ffmpeg-devel mailing list