[FFmpeg-devel] [PATCH 02/12] mips/float_dsp: replace assembly with C implementations

James Cowgill james410 at cowgill.org.uk
Thu Feb 26 14:42:43 CET 2015


The assembly versions have a few problems
- They only work with mips32r2 enabled
- They don't work on 64-bits
- They're massive and complex

So replace them with C implementations which solve these problems and let GCC
magically optimize for different platforms. All the functions are manually
unrolled 4 times (like the assembly code). With the addition of a few restrict
keywords, the functions produce almost identical assembly to the original
versions when compiled with gcc -O3.

Since this code now uses no fpu assembly, drop the HAVE_MIPSFPU guard as well.

Signed-off-by: James Cowgill <james410 at cowgill.org.uk>
---
 libavutil/mips/float_dsp_mips.c | 354 ++++++++--------------------------------
 1 file changed, 72 insertions(+), 282 deletions(-)

diff --git a/libavutil/mips/float_dsp_mips.c b/libavutil/mips/float_dsp_mips.c
index 06d52dc..31425de 100644
--- a/libavutil/mips/float_dsp_mips.c
+++ b/libavutil/mips/float_dsp_mips.c
@@ -52,332 +52,122 @@
  */
 
 #include "config.h"
+#include "libavutil/avassert.h"
 #include "libavutil/float_dsp.h"
 
-#if HAVE_INLINE_ASM && HAVE_MIPSFPU
-static void vector_fmul_mips(float *dst, const float *src0, const float *src1,
-                             int len)
+// The functions here are basically the same as the C implementations but
+// unrolled 4 times to take advantage of pointer alignment + mips fpu registers
+
+static void vector_fmul_mips(
+    float *av_restrict dst, const float *av_restrict src0,
+    const float *av_restrict src1, int len)
 {
     int i;
 
-    if (len & 3) {
-        for (i = 0; i < len; i++)
-            dst[i] = src0[i] * src1[i];
-    } else {
-        float *d     = (float *)dst;
-        float *d_end = d + len;
-        float *s0    = (float *)src0;
-        float *s1    = (float *)src1;
-
-        float src0_0, src0_1, src0_2, src0_3;
-        float src1_0, src1_1, src1_2, src1_3;
-
-        __asm__ volatile (
-            "1:                                         \n\t"
-            "lwc1   %[src0_0],  0(%[s0])                \n\t"
-            "lwc1   %[src1_0],  0(%[s1])                \n\t"
-            "lwc1   %[src0_1],  4(%[s0])                \n\t"
-            "lwc1   %[src1_1],  4(%[s1])                \n\t"
-            "lwc1   %[src0_2],  8(%[s0])                \n\t"
-            "lwc1   %[src1_2],  8(%[s1])                \n\t"
-            "lwc1   %[src0_3],  12(%[s0])               \n\t"
-            "lwc1   %[src1_3],  12(%[s1])               \n\t"
-            "mul.s  %[src0_0],  %[src0_0],  %[src1_0]   \n\t"
-            "mul.s  %[src0_1],  %[src0_1],  %[src1_1]   \n\t"
-            "mul.s  %[src0_2],  %[src0_2],  %[src1_2]   \n\t"
-            "mul.s  %[src0_3],  %[src0_3],  %[src1_3]   \n\t"
-            "swc1   %[src0_0],  0(%[d])                 \n\t"
-            "swc1   %[src0_1],  4(%[d])                 \n\t"
-            "swc1   %[src0_2],  8(%[d])                 \n\t"
-            "swc1   %[src0_3],  12(%[d])                \n\t"
-            "addiu  %[s0],      %[s0],      16          \n\t"
-            "addiu  %[s1],      %[s1],      16          \n\t"
-            "addiu  %[d],       %[d],       16          \n\t"
-            "bne    %[d],       %[d_end],   1b          \n\t"
+    // input length must be a multiple of 4
+    av_assert2(len % 4 == 0);
 
-            : [src0_0]"=&f"(src0_0), [src0_1]"=&f"(src0_1),
-              [src0_2]"=&f"(src0_2), [src0_3]"=&f"(src0_3),
-              [src1_0]"=&f"(src1_0), [src1_1]"=&f"(src1_1),
-              [src1_2]"=&f"(src1_2), [src1_3]"=&f"(src1_3),
-              [d]"+r"(d), [s0]"+r"(s0), [s1]"+r"(s1)
-            : [d_end]"r"(d_end)
-            : "memory"
-        );
+    for (i = 0; i < len; i += 4) {
+        dst[i    ] = src0[i    ] * src1[i    ];
+        dst[i + 1] = src0[i + 1] * src1[i + 1];
+        dst[i + 2] = src0[i + 2] * src1[i + 2];
+        dst[i + 3] = src0[i + 3] * src1[i + 3];
     }
 }
 
-static void vector_fmul_scalar_mips(float *dst, const float *src, float mul,
-                                 int len)
+static void vector_fmul_scalar_mips(
+    float *av_restrict dst, const float *av_restrict src, float mul, int len)
 {
-    float temp0, temp1, temp2, temp3;
-    float *local_src = (float*)src;
-    float *end = local_src + len;
+    int i;
 
-    /* loop unrolled 4 times */
-    __asm__ volatile(
-        ".set    push                             \n\t"
-        ".set    noreorder                        \n\t"
-    "1:                                           \n\t"
-        "lwc1    %[temp0],   0(%[src])            \n\t"
-        "lwc1    %[temp1],   4(%[src])            \n\t"
-        "lwc1    %[temp2],   8(%[src])            \n\t"
-        "lwc1    %[temp3],   12(%[src])           \n\t"
-        "addiu   %[dst],     %[dst],     16       \n\t"
-        "mul.s   %[temp0],   %[temp0],   %[mul]   \n\t"
-        "mul.s   %[temp1],   %[temp1],   %[mul]   \n\t"
-        "mul.s   %[temp2],   %[temp2],   %[mul]   \n\t"
-        "mul.s   %[temp3],   %[temp3],   %[mul]   \n\t"
-        "addiu   %[src],     %[src],     16       \n\t"
-        "swc1    %[temp0],   -16(%[dst])          \n\t"
-        "swc1    %[temp1],   -12(%[dst])          \n\t"
-        "swc1    %[temp2],   -8(%[dst])           \n\t"
-        "bne     %[src],     %[end],     1b       \n\t"
-        " swc1   %[temp3],   -4(%[dst])           \n\t"
-        ".set    pop                              \n\t"
+    // input length must be a multiple of 4
+    av_assert2(len % 4 == 0);
 
-        : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
-          [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
-          [dst]"+r"(dst), [src]"+r"(local_src)
-        : [end]"r"(end), [mul]"f"(mul)
-        : "memory"
-    );
+    for (i = 0; i < len; i += 4) {
+        dst[i    ] = src[i    ] * mul;
+        dst[i + 1] = src[i + 1] * mul;
+        dst[i + 2] = src[i + 2] * mul;
+        dst[i + 3] = src[i + 3] * mul;
+    }
 }
 
-static void vector_fmul_window_mips(float *dst, const float *src0,
-        const float *src1, const float *win, int len)
+static void vector_fmul_window_mips(
+    float *av_restrict dst, const float *av_restrict src0,
+    const float *av_restrict src1, const float *av_restrict win, int len)
 {
     int i, j;
-    /*
-     * variables used in inline assembler
-     */
-    float * dst_i, * dst_j, * dst_i2, * dst_j2;
-    float temp, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+
+    // input length must be a multiple of 4
+    av_assert2(len % 4 == 0);
 
     dst  += len;
     win  += len;
     src0 += len;
 
-    for (i = -len, j = len - 1; i < 0; i += 8, j -= 8) {
-
-        dst_i = dst + i;
-        dst_j = dst + j;
-
-        dst_i2 = dst + i + 4;
-        dst_j2 = dst + j - 4;
-
-        __asm__ volatile (
-            "mul.s   %[temp],   %[s1],       %[wi]            \n\t"
-            "mul.s   %[temp1],  %[s1],       %[wj]            \n\t"
-            "mul.s   %[temp2],  %[s11],      %[wi1]           \n\t"
-            "mul.s   %[temp3],  %[s11],      %[wj1]           \n\t"
-
-            "msub.s  %[temp],   %[temp],     %[s0],  %[wj]    \n\t"
-            "madd.s  %[temp1],  %[temp1],    %[s0],  %[wi]    \n\t"
-            "msub.s  %[temp2],  %[temp2],    %[s01], %[wj1]   \n\t"
-            "madd.s  %[temp3],  %[temp3],    %[s01], %[wi1]   \n\t"
+    for (i = -len, j = len - 1; i < 0; i += 4, j -= 4) {
+        dst[i    ] = src0[i    ] * win[j    ] - src1[j    ] * win[i    ];
+        dst[j    ] = src0[i    ] * win[i    ] + src1[j    ] * win[j    ];
 
-            "swc1    %[temp],   0(%[dst_i])                   \n\t" /* dst[i] = s0*wj - s1*wi; */
-            "swc1    %[temp1],  0(%[dst_j])                   \n\t" /* dst[j] = s0*wi + s1*wj; */
-            "swc1    %[temp2],  4(%[dst_i])                   \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
-            "swc1    %[temp3], -4(%[dst_j])                   \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
+        dst[i + 1] = src0[i + 1] * win[j - 1] - src1[j - 1] * win[i + 1];
+        dst[j - 1] = src0[i + 1] * win[i + 1] + src1[j - 1] * win[j - 1];
 
-            "mul.s   %[temp4],  %[s12],      %[wi2]           \n\t"
-            "mul.s   %[temp5],  %[s12],      %[wj2]           \n\t"
-            "mul.s   %[temp6],  %[s13],      %[wi3]           \n\t"
-            "mul.s   %[temp7],  %[s13],      %[wj3]           \n\t"
+        dst[i + 2] = src0[i + 2] * win[j - 2] - src1[j - 2] * win[i + 2];
+        dst[j - 2] = src0[i + 2] * win[i + 2] + src1[j - 2] * win[j - 2];
 
-            "msub.s  %[temp4],  %[temp4],    %[s02], %[wj2]   \n\t"
-            "madd.s  %[temp5],  %[temp5],    %[s02], %[wi2]   \n\t"
-            "msub.s  %[temp6],  %[temp6],    %[s03], %[wj3]   \n\t"
-            "madd.s  %[temp7],  %[temp7],    %[s03], %[wi3]   \n\t"
-
-            "swc1    %[temp4],  8(%[dst_i])                   \n\t" /* dst[i+2] = s02*wj2 - s12*wi2; */
-            "swc1    %[temp5], -8(%[dst_j])                   \n\t" /* dst[j-2] = s02*wi2 + s12*wj2; */
-            "swc1    %[temp6],  12(%[dst_i])                  \n\t" /* dst[i+2] = s03*wj3 - s13*wi3; */
-            "swc1    %[temp7], -12(%[dst_j])                  \n\t" /* dst[j-3] = s03*wi3 + s13*wj3; */
-            : [temp]"=&f"(temp),  [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
-              [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
-              [temp6]"=&f"(temp6), [temp7]"=&f"(temp7)
-            : [dst_j]"r"(dst_j),     [dst_i]"r" (dst_i),
-              [s0] "f"(src0[i]),     [wj] "f"(win[j]),     [s1] "f"(src1[j]),
-              [wi] "f"(win[i]),      [s01]"f"(src0[i + 1]),[wj1]"f"(win[j - 1]),
-              [s11]"f"(src1[j - 1]), [wi1]"f"(win[i + 1]), [s02]"f"(src0[i + 2]),
-              [wj2]"f"(win[j - 2]),  [s12]"f"(src1[j - 2]),[wi2]"f"(win[i + 2]),
-              [s03]"f"(src0[i + 3]), [wj3]"f"(win[j - 3]), [s13]"f"(src1[j - 3]),
-              [wi3]"f"(win[i + 3])
-            : "memory"
-        );
+        dst[i + 3] = src0[i + 3] * win[j - 3] - src1[j - 3] * win[i + 3];
+        dst[j - 3] = src0[i + 3] * win[i + 3] + src1[j - 3] * win[j - 3];
+    }
+}
 
-        __asm__ volatile (
-            "mul.s  %[temp],   %[s1],       %[wi]            \n\t"
-            "mul.s  %[temp1],  %[s1],       %[wj]            \n\t"
-            "mul.s  %[temp2],  %[s11],      %[wi1]           \n\t"
-            "mul.s  %[temp3],  %[s11],      %[wj1]           \n\t"
+static void butterflies_float_mips(
+    float *av_restrict v1, float *av_restrict v2, int len)
+{
+    int i;
 
-            "msub.s %[temp],   %[temp],     %[s0],  %[wj]    \n\t"
-            "madd.s %[temp1],  %[temp1],    %[s0],  %[wi]    \n\t"
-            "msub.s %[temp2],  %[temp2],    %[s01], %[wj1]   \n\t"
-            "madd.s %[temp3],  %[temp3],    %[s01], %[wi1]   \n\t"
+    // input length must be a multiple of 4
+    av_assert2(len % 4 == 0);
 
-            "swc1   %[temp],   0(%[dst_i2])                  \n\t" /* dst[i] = s0*wj - s1*wi; */
-            "swc1   %[temp1],  0(%[dst_j2])                  \n\t" /* dst[j] = s0*wi + s1*wj; */
-            "swc1   %[temp2],  4(%[dst_i2])                  \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */
-            "swc1   %[temp3], -4(%[dst_j2])                  \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */
+    for (i = 0; i < len; i += 4) {
+        float diffs[4];
 
-            "mul.s  %[temp4],  %[s12],      %[wi2]           \n\t"
-            "mul.s  %[temp5],  %[s12],      %[wj2]           \n\t"
-            "mul.s  %[temp6],  %[s13],      %[wi3]           \n\t"
-            "mul.s  %[temp7],  %[s13],      %[wj3]           \n\t"
+        diffs[0] = v1[i    ] - v2[i    ];
+        diffs[1] = v1[i + 1] - v2[i + 1];
+        diffs[2] = v1[i + 2] - v2[i + 2];
+        diffs[3] = v1[i + 3] - v2[i + 3];
 
-            "msub.s %[temp4],  %[temp4],    %[s02], %[wj2]   \n\t"
-            "madd.s %[temp5],  %[temp5],    %[s02], %[wi2]   \n\t"
-            "msub.s %[temp6],  %[temp6],    %[s03], %[wj3]   \n\t"
-            "madd.s %[temp7],  %[temp7],    %[s03], %[wi3]   \n\t"
+        v1[i    ] += v2[i    ];
+        v1[i + 1] += v2[i + 1];
+        v1[i + 2] += v2[i + 2];
+        v1[i + 3] += v2[i + 3];
 
-            "swc1   %[temp4],  8(%[dst_i2])                  \n\t" /* dst[i+2] = s02*wj2 - s12*wi2; */
-            "swc1   %[temp5], -8(%[dst_j2])                  \n\t" /* dst[j-2] = s02*wi2 + s12*wj2; */
-            "swc1   %[temp6],  12(%[dst_i2])                 \n\t" /* dst[i+2] = s03*wj3 - s13*wi3; */
-            "swc1   %[temp7], -12(%[dst_j2])                 \n\t" /* dst[j-3] = s03*wi3 + s13*wj3; */
-            : [temp]"=&f"(temp),
-              [temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
-              [temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
-              [temp7]  "=&f" (temp7)
-            : [dst_j2]"r"(dst_j2),   [dst_i2]"r"(dst_i2),
-              [s0] "f"(src0[i + 4]), [wj] "f"(win[j - 4]), [s1] "f"(src1[j - 4]),
-              [wi] "f"(win[i + 4]),  [s01]"f"(src0[i + 5]),[wj1]"f"(win[j - 5]),
-              [s11]"f"(src1[j - 5]), [wi1]"f"(win[i + 5]), [s02]"f"(src0[i + 6]),
-              [wj2]"f"(win[j - 6]),  [s12]"f"(src1[j - 6]),[wi2]"f"(win[i + 6]),
-              [s03]"f"(src0[i + 7]), [wj3]"f"(win[j - 7]), [s13]"f"(src1[j - 7]),
-              [wi3]"f"(win[i + 7])
-            : "memory"
-        );
+        v2[i    ] = diffs[0];
+        v2[i + 1] = diffs[1];
+        v2[i + 2] = diffs[2];
+        v2[i + 3] = diffs[3];
     }
 }
 
-static void butterflies_float_mips(float *av_restrict v1, float *av_restrict v2,
-                                int len)
+static void vector_fmul_reverse_mips(
+    float *av_restrict dst, const float *av_restrict src0,
+    const float *av_restrict src1, int len)
 {
-    float temp0, temp1, temp2, temp3, temp4;
-    float temp5, temp6, temp7, temp8, temp9;
-    float temp10, temp11, temp12, temp13, temp14, temp15;
-    int pom;
-    pom = (len >> 2)-1;
-
-    /* loop unrolled 4 times */
-    __asm__ volatile (
-        "lwc1     %[temp0],    0(%[v1])                 \n\t"
-        "lwc1     %[temp1],    4(%[v1])                 \n\t"
-        "lwc1     %[temp2],    8(%[v1])                 \n\t"
-        "lwc1     %[temp3],    12(%[v1])                \n\t"
-        "lwc1     %[temp4],    0(%[v2])                 \n\t"
-        "lwc1     %[temp5],    4(%[v2])                 \n\t"
-        "lwc1     %[temp6],    8(%[v2])                 \n\t"
-        "lwc1     %[temp7],    12(%[v2])                \n\t"
-        "beq      %[pom],      $zero,       2f          \n\t"
-    "1:                                                 \n\t"
-        "sub.s    %[temp8],    %[temp0],    %[temp4]    \n\t"
-        "add.s    %[temp9],    %[temp0],    %[temp4]    \n\t"
-        "sub.s    %[temp10],   %[temp1],    %[temp5]    \n\t"
-        "add.s    %[temp11],   %[temp1],    %[temp5]    \n\t"
-        "sub.s    %[temp12],   %[temp2],    %[temp6]    \n\t"
-        "add.s    %[temp13],   %[temp2],    %[temp6]    \n\t"
-        "sub.s    %[temp14],   %[temp3],    %[temp7]    \n\t"
-        "add.s    %[temp15],   %[temp3],    %[temp7]    \n\t"
-        "addiu    %[v1],       %[v1],       16          \n\t"
-        "addiu    %[v2],       %[v2],       16          \n\t"
-        "addiu    %[pom],      %[pom],      -1          \n\t"
-        "lwc1     %[temp0],    0(%[v1])                 \n\t"
-        "lwc1     %[temp1],    4(%[v1])                 \n\t"
-        "lwc1     %[temp2],    8(%[v1])                 \n\t"
-        "lwc1     %[temp3],    12(%[v1])                \n\t"
-        "lwc1     %[temp4],    0(%[v2])                 \n\t"
-        "lwc1     %[temp5],    4(%[v2])                 \n\t"
-        "lwc1     %[temp6],    8(%[v2])                 \n\t"
-        "lwc1     %[temp7],    12(%[v2])                \n\t"
-        "swc1     %[temp9],    -16(%[v1])               \n\t"
-        "swc1     %[temp8],    -16(%[v2])               \n\t"
-        "swc1     %[temp11],   -12(%[v1])               \n\t"
-        "swc1     %[temp10],   -12(%[v2])               \n\t"
-        "swc1     %[temp13],   -8(%[v1])                \n\t"
-        "swc1     %[temp12],   -8(%[v2])                \n\t"
-        "swc1     %[temp15],   -4(%[v1])                \n\t"
-        "swc1     %[temp14],   -4(%[v2])                \n\t"
-        "bgtz     %[pom],      1b                       \n\t"
-    "2:                                                 \n\t"
-        "sub.s    %[temp8],    %[temp0],    %[temp4]    \n\t"
-        "add.s    %[temp9],    %[temp0],    %[temp4]    \n\t"
-        "sub.s    %[temp10],   %[temp1],    %[temp5]    \n\t"
-        "add.s    %[temp11],   %[temp1],    %[temp5]    \n\t"
-        "sub.s    %[temp12],   %[temp2],    %[temp6]    \n\t"
-        "add.s    %[temp13],   %[temp2],    %[temp6]    \n\t"
-        "sub.s    %[temp14],   %[temp3],    %[temp7]    \n\t"
-        "add.s    %[temp15],   %[temp3],    %[temp7]    \n\t"
-        "swc1     %[temp9],    0(%[v1])                 \n\t"
-        "swc1     %[temp8],    0(%[v2])                 \n\t"
-        "swc1     %[temp11],   4(%[v1])                 \n\t"
-        "swc1     %[temp10],   4(%[v2])                 \n\t"
-        "swc1     %[temp13],   8(%[v1])                 \n\t"
-        "swc1     %[temp12],   8(%[v2])                 \n\t"
-        "swc1     %[temp15],   12(%[v1])                \n\t"
-        "swc1     %[temp14],   12(%[v2])                \n\t"
-
-        : [v1]"+r"(v1), [v2]"+r"(v2), [pom]"+r"(pom), [temp0] "=&f" (temp0),
-          [temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3),
-          [temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6),
-          [temp7]"=&f"(temp7), [temp8]"=&f"(temp8), [temp9]"=&f"(temp9),
-          [temp10]"=&f"(temp10), [temp11]"=&f"(temp11), [temp12]"=&f"(temp12),
-          [temp13]"=&f"(temp13), [temp14]"=&f"(temp14), [temp15]"=&f"(temp15)
-        :
-        : "memory"
-    );
-}
-
-static void vector_fmul_reverse_mips(float *dst, const float *src0, const float *src1, int len){
     int i;
-    float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
-    src1 += len-1;
 
-    for(i=0; i<(len>>2); i++)
-    {
-        /* loop unrolled 4 times */
-        __asm__ volatile(
-            "lwc1      %[temp0],     0(%[src0])                 \n\t"
-            "lwc1      %[temp1],     0(%[src1])                 \n\t"
-            "lwc1      %[temp2],     4(%[src0])                 \n\t"
-            "lwc1      %[temp3],     -4(%[src1])                \n\t"
-            "lwc1      %[temp4],     8(%[src0])                 \n\t"
-            "lwc1      %[temp5],     -8(%[src1])                \n\t"
-            "lwc1      %[temp6],     12(%[src0])                \n\t"
-            "lwc1      %[temp7],     -12(%[src1])               \n\t"
-            "mul.s     %[temp0],     %[temp1],     %[temp0]     \n\t"
-            "mul.s     %[temp2],     %[temp3],     %[temp2]     \n\t"
-            "mul.s     %[temp4],     %[temp5],     %[temp4]     \n\t"
-            "mul.s     %[temp6],     %[temp7],     %[temp6]     \n\t"
-            "addiu     %[src0],      %[src0],      16           \n\t"
-            "addiu     %[src1],      %[src1],      -16          \n\t"
-            "addiu     %[dst],       %[dst],       16           \n\t"
-            "swc1      %[temp0],     -16(%[dst])                \n\t"
-            "swc1      %[temp2],     -12(%[dst])                \n\t"
-            "swc1      %[temp4],     -8(%[dst])                 \n\t"
-            "swc1      %[temp6],     -4(%[dst])                 \n\t"
+    // input length must be a multiple of 4
+    av_assert2(len % 4 == 0);
 
-            : [dst]"+r"(dst), [src0]"+r"(src0), [src1]"+r"(src1),
-              [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),[temp2]"=&f"(temp2),
-              [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
-              [temp6]"=&f"(temp6), [temp7]"=&f"(temp7)
-            :
-            : "memory"
-        );
+    for (i = 0; i < len; i += 4) {
+        dst[i    ] = src0[i    ] * src1[len - i - 1];
+        dst[i + 1] = src0[i + 1] * src1[len - i - 2];
+        dst[i + 2] = src0[i + 2] * src1[len - i - 3];
+        dst[i + 3] = src0[i + 3] * src1[len - i - 4];
     }
 }
-#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
 
 void ff_float_dsp_init_mips(AVFloatDSPContext *fdsp) {
-#if HAVE_INLINE_ASM && HAVE_MIPSFPU
     fdsp->vector_fmul = vector_fmul_mips;
     fdsp->vector_fmul_scalar  = vector_fmul_scalar_mips;
     fdsp->vector_fmul_window = vector_fmul_window_mips;
     fdsp->butterflies_float = butterflies_float_mips;
     fdsp->vector_fmul_reverse = vector_fmul_reverse_mips;
-#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */
 }
-- 
2.1.4



More information about the ffmpeg-devel mailing list