[FFmpeg-devel] [PATCH 2/3] mips: optimization for float aac decoder (sbr module)

Nedeljko Babic nbabic at mips.com
Wed Jan 9 10:45:33 CET 2013


From: Mirjana Vulin <mirjana.vulin at rt-rk.com>

Change-Id: Ifa16dd1f435a4c2b2d9c2e0b320775f02ddc0d69
Signed-off-by: Mirjana Vulin <mvulin at mips.com>
---
 libavcodec/aac.h               |    2 -
 libavcodec/aacsbr.c            |   30 ++-
 libavcodec/aacsbr.h            |    2 +
 libavcodec/mips/Makefile       |    4 +-
 libavcodec/mips/aacsbr_mips.c  |  618 ++++++++++++++++++++++++++
 libavcodec/mips/aacsbr_mips.h  |  493 +++++++++++++++++++++
 libavcodec/mips/dsputil_mips.c |   40 ++
 libavcodec/mips/sbrdsp_mips.c  |  940 ++++++++++++++++++++++++++++++++++++++++
 libavcodec/sbr.h               |   28 ++-
 libavcodec/sbrdsp.c            |    2 +
 libavcodec/sbrdsp.h            |    1 +
 11 files changed, 2151 insertions(+), 9 deletions(-)
 create mode 100644 libavcodec/mips/aacsbr_mips.c
 create mode 100644 libavcodec/mips/aacsbr_mips.h
 create mode 100644 libavcodec/mips/sbrdsp_mips.c

diff --git a/libavcodec/aac.h b/libavcodec/aac.h
index 7b96e3b..7fdf63b 100644
--- a/libavcodec/aac.h
+++ b/libavcodec/aac.h
@@ -257,8 +257,6 @@ typedef struct ChannelElement {
     SpectralBandReplication sbr;
 } ChannelElement;
 
-typedef struct AACContext AACContext;
-
 /**
  * main AAC context
  */
diff --git a/libavcodec/aacsbr.c b/libavcodec/aacsbr.c
index 714e48c..9366d7b 100644
--- a/libavcodec/aacsbr.c
+++ b/libavcodec/aacsbr.c
@@ -43,6 +43,10 @@
 #define ENVELOPE_ADJUSTMENT_OFFSET 2
 #define NOISE_FLOOR_OFFSET 6.0f
 
+#if ARCH_MIPS
+#include "mips/aacsbr_mips.h"
+#endif /* ARCH_MIPS */
+
 /**
  * SBR VLC tables
  */
@@ -86,6 +90,8 @@ static const int8_t vlc_sbr_lav[10] =
 #define SBR_VLC_ROW(name) \
     { name ## _codes, name ## _bits, sizeof(name ## _codes), sizeof(name ## _codes[0]) }
 
+static void ff_aacsbr_init(AACSBRContext *c);
+
 av_cold void ff_aac_sbr_init(void)
 {
     int n;
@@ -154,6 +160,7 @@ av_cold void ff_aac_sbr_ctx_init(AACContext *ac, SpectralBandReplication *sbr)
     ff_mdct_init(&sbr->mdct_ana, 7, 1, -2.0 * 32768.0);
     ff_ps_ctx_init(&sbr->ps);
     ff_sbrdsp_init(&sbr->dsp);
+    ff_aacsbr_init(&sbr->c);
 }
 
 av_cold void ff_aac_sbr_ctx_close(SpectralBandReplication *sbr)
@@ -1156,6 +1163,7 @@ static void sbr_dequant(SpectralBandReplication *sbr, int id_aac)
  * @param   x       pointer to the beginning of the first sample window
  * @param   W       array of complex-valued samples split into subbands
  */
+#ifndef sbr_qmf_analysis
 static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct,
                              SBRDSPContext *sbrdsp, const float *in, float *x,
                              float z[320], float W[2][32][32][2], int buf_idx)
@@ -1173,11 +1181,13 @@ static void sbr_qmf_analysis(DSPContext *dsp, FFTContext *mdct,
         x += 32;
     }
 }
+#endif
 
 /**
  * Synthesis QMF Bank (14496-3 sp04 p206) and Downsampled Synthesis QMF Bank
  * (14496-3 sp04 p206)
  */
+#ifndef sbr_qmf_synthesis
 static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
                               SBRDSPContext *sbrdsp, AVFloatDSPContext *fdsp,
                               float *out, float X[2][38][64],
@@ -1223,6 +1233,7 @@ static void sbr_qmf_synthesis(DSPContext *dsp, FFTContext *mdct,
         out += 64 >> div;
     }
 }
+#endif
 
 /** High Frequency Generation (14496-3 sp04 p214+) and Inverse Filtering
  * (14496-3 sp04 p214)
@@ -1671,10 +1682,10 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
         sbr_qmf_analysis(&ac->dsp, &sbr->mdct_ana, &sbr->dsp, ch ? R : L, sbr->data[ch].analysis_filterbank_samples,
                          (float*)sbr->qmf_filter_scratch,
                          sbr->data[ch].W, sbr->data[ch].Ypos);
-        sbr_lf_gen(ac, sbr, sbr->X_low, sbr->data[ch].W, sbr->data[ch].Ypos);
+        sbr->c.sbr_lf_gen(ac, sbr, sbr->X_low, sbr->data[ch].W, sbr->data[ch].Ypos);
         sbr->data[ch].Ypos ^= 1;
         if (sbr->start) {
-            sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1, sbr->X_low, sbr->k[0]);
+            sbr->c.sbr_hf_inverse_filter(&sbr->dsp, sbr->alpha0, sbr->alpha1, sbr->X_low, sbr->k[0]);
             sbr_chirp(sbr, &sbr->data[ch]);
             sbr_hf_gen(ac, sbr, sbr->X_high, sbr->X_low, sbr->alpha0, sbr->alpha1,
                        sbr->data[ch].bw_array, sbr->data[ch].t_env,
@@ -1685,14 +1696,14 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
             if (!err) {
                 sbr_env_estimate(sbr->e_curr, sbr->X_high, sbr, &sbr->data[ch]);
                 sbr_gain_calc(ac, sbr, &sbr->data[ch], sbr->data[ch].e_a);
-                sbr_hf_assemble(sbr->data[ch].Y[sbr->data[ch].Ypos],
+                sbr->c.sbr_hf_assemble(sbr->data[ch].Y[sbr->data[ch].Ypos],
                                 sbr->X_high, sbr, &sbr->data[ch],
                                 sbr->data[ch].e_a);
             }
         }
 
         /* synthesis */
-        sbr_x_gen(sbr, sbr->X[ch],
+        sbr->c.sbr_x_gen(sbr, sbr->X[ch],
                   sbr->data[ch].Y[1-sbr->data[ch].Ypos],
                   sbr->data[ch].Y[  sbr->data[ch].Ypos],
                   sbr->X_low, ch);
@@ -1719,3 +1730,14 @@ void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
                           &sbr->data[1].synthesis_filterbank_samples_offset,
                           downsampled);
 }
+
+static void ff_aacsbr_init(AACSBRContext *c)
+{
+    c->sbr_lf_gen            = sbr_lf_gen;
+    c->sbr_hf_assemble       = sbr_hf_assemble;
+    c->sbr_x_gen             = sbr_x_gen;
+    c->sbr_hf_inverse_filter = sbr_hf_inverse_filter;
+
+    if(ARCH_MIPS)
+        ff_aacsbr_init_mips(c);
+}
diff --git a/libavcodec/aacsbr.h b/libavcodec/aacsbr.h
index d028498..4f3c842 100644
--- a/libavcodec/aacsbr.h
+++ b/libavcodec/aacsbr.h
@@ -46,4 +46,6 @@ int ff_decode_sbr_extension(AACContext *ac, SpectralBandReplication *sbr,
 void ff_sbr_apply(AACContext *ac, SpectralBandReplication *sbr, int id_aac,
                   float* L, float *R);
 
+void ff_aacsbr_init_mips(AACSBRContext *c);
+
 #endif /* AVCODEC_AACSBR_H */
diff --git a/libavcodec/mips/Makefile b/libavcodec/mips/Makefile
index ded74db..82b28ee 100644
--- a/libavcodec/mips/Makefile
+++ b/libavcodec/mips/Makefile
@@ -14,4 +14,6 @@ MIPSFPU-OBJS-$(CONFIG_FFT)                += mips/fft_mips.o
 MIPSFPU-OBJS                              += mips/dsputil_mips.o           \
                                              mips/fmtconvert_mips.o
 OBJS-$(CONFIG_AC3DSP)                     += mips/ac3dsp_mips.o
-OBJS-$(CONFIG_AAC_DECODER)                += mips/aacdec_mips.o
+OBJS-$(CONFIG_AAC_DECODER)                += mips/aacdec_mips.o            \
+                                             mips/aacsbr_mips.o            \
+                                             mips/sbrdsp_mips.o
diff --git a/libavcodec/mips/aacsbr_mips.c b/libavcodec/mips/aacsbr_mips.c
new file mode 100644
index 0000000..dafd280
--- /dev/null
+++ b/libavcodec/mips/aacsbr_mips.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2012
+ *      MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors:  Djordje Pesut   (djordje at mips.com)
+ *           Mirjana Vulin   (mvulin at mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacsbr.c
+ */
+
+#include "libavcodec/aac.h"
+#include "libavcodec/aacsbr.h"
+
+#define ENVELOPE_ADJUSTMENT_OFFSET 2
+
+#if HAVE_INLINE_ASM
+static int sbr_lf_gen_mips(AACContext *ac, SpectralBandReplication *sbr,
+                      float X_low[32][40][2], const float W[2][32][32][2],
+                      int buf_idx)
+{
+    int i, k;
+    int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+    float *p_x_low = &X_low[0][8][0];
+    float *p_w = (float*)&W[buf_idx][0][0][0];
+    float *p_x1_low = &X_low[0][0][0];
+    float *p_w1 = (float*)&W[1-buf_idx][24][0][0];
+
+    float *loop_end=p_x1_low + 2560;
+
+    /* loop unrolled 8 times */
+    __asm__ volatile (
+    "1:                                                 \n\t"
+        "sw     $0,            0(%[p_x1_low])           \n\t"
+        "sw     $0,            4(%[p_x1_low])           \n\t"
+        "sw     $0,            8(%[p_x1_low])           \n\t"
+        "sw     $0,            12(%[p_x1_low])          \n\t"
+        "sw     $0,            16(%[p_x1_low])          \n\t"
+        "sw     $0,            20(%[p_x1_low])          \n\t"
+        "sw     $0,            24(%[p_x1_low])          \n\t"
+        "sw     $0,            28(%[p_x1_low])          \n\t"
+        "addiu  %[p_x1_low],   %[p_x1_low],      32     \n\t"
+        "bne    %[p_x1_low],   %[loop_end],      1b     \n\t"
+        "addiu  %[p_x1_low],   %[p_x1_low],      -10240 \n\t"
+
+        : [p_x1_low]"+r"(p_x1_low)
+        : [loop_end]"r"(loop_end)
+        : "memory"
+    );
+
+    for (k = 0; k < sbr->kx[1]; k++) {
+        for (i = 0; i < 32; i+=4) {
+            /* loop unrolled 4 times */
+            __asm__ volatile (
+                "lw     %[temp0],   0(%[p_w])               \n\t"
+                "lw     %[temp1],   4(%[p_w])               \n\t"
+                "lw     %[temp2],   256(%[p_w])             \n\t"
+                "lw     %[temp3],   260(%[p_w])             \n\t"
+                "lw     %[temp4],   512(%[p_w])             \n\t"
+                "lw     %[temp5],   516(%[p_w])             \n\t"
+                "lw     %[temp6],   768(%[p_w])             \n\t"
+                "lw     %[temp7],   772(%[p_w])             \n\t"
+                "sw     %[temp0],   0(%[p_x_low])           \n\t"
+                "sw     %[temp1],   4(%[p_x_low])           \n\t"
+                "sw     %[temp2],   8(%[p_x_low])           \n\t"
+                "sw     %[temp3],   12(%[p_x_low])          \n\t"
+                "sw     %[temp4],   16(%[p_x_low])          \n\t"
+                "sw     %[temp5],   20(%[p_x_low])          \n\t"
+                "sw     %[temp6],   24(%[p_x_low])          \n\t"
+                "sw     %[temp7],   28(%[p_x_low])          \n\t"
+                "addiu  %[p_x_low], %[p_x_low],     32      \n\t"
+                "addiu  %[p_w],     %[p_w],         1024    \n\t"
+
+                : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+                  [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+                  [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+                  [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+                  [p_w]"+r"(p_w), [p_x_low]"+r"(p_x_low)
+                :
+                : "memory"
+            );
+        }
+        p_x_low += 16;
+        p_w -= 2046;
+    }
+
+    for (k = 0; k < sbr->kx[0]; k++) {
+        for (i = 0; i < 2; i++) {
+
+            /* loop unrolled 4 times */
+            __asm__ volatile (
+                "lw     %[temp0],    0(%[p_w1])             \n\t"
+                "lw     %[temp1],    4(%[p_w1])             \n\t"
+                "lw     %[temp2],    256(%[p_w1])           \n\t"
+                "lw     %[temp3],    260(%[p_w1])           \n\t"
+                "lw     %[temp4],    512(%[p_w1])           \n\t"
+                "lw     %[temp5],    516(%[p_w1])           \n\t"
+                "lw     %[temp6],    768(%[p_w1])           \n\t"
+                "lw     %[temp7],    772(%[p_w1])           \n\t"
+                "sw     %[temp0],    0(%[p_x1_low])         \n\t"
+                "sw     %[temp1],    4(%[p_x1_low])         \n\t"
+                "sw     %[temp2],    8(%[p_x1_low])         \n\t"
+                "sw     %[temp3],    12(%[p_x1_low])        \n\t"
+                "sw     %[temp4],    16(%[p_x1_low])        \n\t"
+                "sw     %[temp5],    20(%[p_x1_low])        \n\t"
+                "sw     %[temp6],    24(%[p_x1_low])        \n\t"
+                "sw     %[temp7],    28(%[p_x1_low])        \n\t"
+                "addiu  %[p_x1_low], %[p_x1_low],   32      \n\t"
+                "addiu  %[p_w1],     %[p_w1],       1024    \n\t"
+
+                : [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+                  [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+                  [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+                  [temp6]"=&r"(temp6), [temp7]"=&r"(temp7),
+                  [p_w1]"+r"(p_w1), [p_x1_low]"+r"(p_x1_low)
+                :
+                : "memory"
+            );
+        }
+        p_x1_low += 64;
+        p_w1 -= 510;
+    }
+    return 0;
+}
+
+static int sbr_x_gen_mips(SpectralBandReplication *sbr, float X[2][38][64],
+                     const float Y0[38][64][2], const float Y1[38][64][2],
+                     const float X_low[32][40][2], int ch)
+{
+    int k, i;
+    const int i_f = 32;
+    int temp0, temp1, temp2, temp3;
+    const float *X_low1, *Y01, *Y11;
+    float *x1=&X[0][0][0];
+    float *j=x1+4864;
+    const int i_Temp = FFMAX(2*sbr->data[ch].t_env_num_env_old - i_f, 0);
+
+    /* loop unrolled 8 times */
+    __asm__ volatile (
+    "1:                                       \n\t"
+        "sw     $0,      0(%[x1])             \n\t"
+        "sw     $0,      4(%[x1])             \n\t"
+        "sw     $0,      8(%[x1])             \n\t"
+        "sw     $0,      12(%[x1])            \n\t"
+        "sw     $0,      16(%[x1])            \n\t"
+        "sw     $0,      20(%[x1])            \n\t"
+        "sw     $0,      24(%[x1])            \n\t"
+        "sw     $0,      28(%[x1])            \n\t"
+        "addiu  %[x1],   %[x1],      32       \n\t"
+        "bne    %[x1],   %[j],       1b       \n\t"
+        "addiu  %[x1],   %[x1],      -19456   \n\t"
+
+        : [x1]"+r"(x1)
+        : [j]"r"(j)
+        : "memory"
+    );
+
+    if (i_Temp != 0) {
+
+        X_low1=&X_low[0][2][0];
+
+        for (k = 0; k < sbr->kx[0]; k++) {
+
+            __asm__ volatile (
+                "move    %[i],        $zero                  \n\t"
+            "2:                                              \n\t"
+                "lw      %[temp0],    0(%[X_low1])           \n\t"
+                "lw      %[temp1],    4(%[X_low1])           \n\t"
+                "sw      %[temp0],    0(%[x1])               \n\t"
+                "sw      %[temp1],    9728(%[x1])            \n\t"
+                "addiu   %[x1],       %[x1],         256     \n\t"
+                "addiu   %[X_low1],   %[X_low1],     8       \n\t"
+                "addiu   %[i],        %[i],          1       \n\t"
+                "bne     %[i],        %[i_Temp],     2b      \n\t"
+
+                : [x1]"+r"(x1), [X_low1]"+r"(X_low1), [i]"=&r"(i),
+                  [temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
+                : [i_Temp]"r"(i_Temp)
+                : "memory"
+            );
+            x1-=(i_Temp<<6)-1;
+            X_low1-=(i_Temp<<1)-80;
+        }
+
+        x1=&X[0][0][k];
+        Y01=(float*)&Y0[32][k][0];
+
+        for (; k < sbr->kx[0] + sbr->m[0]; k++) {
+            __asm__ volatile (
+                "move    %[i],       $zero               \n\t"
+            "3:                                          \n\t"
+                "lw      %[temp0],   0(%[Y01])           \n\t"
+                "lw      %[temp1],   4(%[Y01])           \n\t"
+                "sw      %[temp0],   0(%[x1])            \n\t"
+                "sw      %[temp1],   9728(%[x1])         \n\t"
+                "addiu   %[x1],      %[x1],      256     \n\t"
+                "addiu   %[Y01],     %[Y01],     512     \n\t"
+                "addiu   %[i],       %[i],       1       \n\t"
+                "bne     %[i],       %[i_Temp],  3b      \n\t"
+
+                : [x1]"+r"(x1), [Y01]"+r"(Y01), [i]"=&r"(i),
+                  [temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
+                : [i_Temp]"r"(i_Temp)
+                : "memory"
+            );
+            x1 -=(i_Temp<<6)-1;
+            Y01 -=(i_Temp<<7)-2;
+        }
+    }
+
+    x1=&X[0][i_Temp][0];
+    X_low1=&X_low[0][i_Temp+2][0];
+    temp3=38;
+
+    for (k = 0; k < sbr->kx[1]; k++) {
+
+        __asm__ volatile (
+            "move    %[i],       %[i_Temp]              \n\t"
+        "4:                                             \n\t"
+            "lw      %[temp0],   0(%[X_low1])           \n\t"
+            "lw      %[temp1],   4(%[X_low1])           \n\t"
+            "sw      %[temp0],   0(%[x1])               \n\t"
+            "sw      %[temp1],   9728(%[x1])            \n\t"
+            "addiu   %[x1],      %[x1],         256     \n\t"
+            "addiu   %[X_low1],  %[X_low1],     8       \n\t"
+            "addiu   %[i],       %[i],          1       \n\t"
+            "bne     %[i],       %[temp3],      4b      \n\t"
+
+            : [x1]"+r"(x1), [X_low1]"+r"(X_low1), [i]"=&r"(i),
+              [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+              [temp2]"=&r"(temp2)
+            : [i_Temp]"r"(i_Temp), [temp3]"r"(temp3)
+            : "memory"
+        );
+        x1 -= ((38-i_Temp)<<6)-1;
+        X_low1 -= ((38-i_Temp)<<1)- 80;
+    }
+
+    x1=&X[0][i_Temp][k];
+    Y11=&Y1[i_Temp][k][0];
+    temp2=32;
+
+    for (; k < sbr->kx[1] + sbr->m[1]; k++) {
+
+        __asm__ volatile (
+           "move    %[i],       %[i_Temp]               \n\t"
+        "5:                                             \n\t"
+           "lw      %[temp0],   0(%[Y11])               \n\t"
+           "lw      %[temp1],   4(%[Y11])               \n\t"
+           "sw      %[temp0],   0(%[x1])                \n\t"
+           "sw      %[temp1],   9728(%[x1])             \n\t"
+           "addiu   %[x1],      %[x1],          256     \n\t"
+           "addiu   %[Y11],     %[Y11],         512     \n\t"
+           "addiu   %[i],       %[i],           1       \n\t"
+           "bne     %[i],       %[temp2],       5b      \n\t"
+
+           : [x1]"+r"(x1), [Y11]"+r"(Y11), [i]"=&r"(i),
+             [temp0]"=&r"(temp0), [temp1]"=&r"(temp1)
+           : [i_Temp]"r"(i_Temp), [temp3]"r"(temp3),
+             [temp2]"r"(temp2)
+           : "memory"
+        );
+
+        x1 -= ((32-i_Temp)<<6)-1;
+        Y11 -= ((32-i_Temp)<<7)-2;
+   }
+      return 0;
+}
+
+#if HAVE_MIPSFPU
+static void sbr_hf_assemble_mips(float Y1[38][64][2],
+                            const float X_high[64][40][2],
+                            SpectralBandReplication *sbr, SBRData *ch_data,
+                            const int e_a[2])
+{
+    int e, i, j, m;
+    const int h_SL = 4 * !sbr->bs_smoothing_mode;
+    const int kx = sbr->kx[1];
+    const int m_max = sbr->m[1];
+    static const float h_smooth[5] = {
+        0.33333333333333,
+        0.30150283239582,
+        0.21816949906249,
+        0.11516383427084,
+        0.03183050093751,
+    };
+
+    float (*g_temp)[48] = ch_data->g_temp, (*q_temp)[48] = ch_data->q_temp;
+    int indexnoise = ch_data->f_indexnoise;
+    int indexsine  = ch_data->f_indexsine;
+    float *g_temp1, *q_temp1, *pok, *pok1;
+    float temp1, temp2, temp3, temp4;
+    int size = m_max;
+
+    if (sbr->reset) {
+        for (i = 0; i < h_SL; i++) {
+            memcpy(g_temp[i + 2*ch_data->t_env[0]], sbr->gain[0], m_max * sizeof(sbr->gain[0][0]));
+            memcpy(q_temp[i + 2*ch_data->t_env[0]], sbr->q_m[0],  m_max * sizeof(sbr->q_m[0][0]));
+        }
+    } else if (h_SL) {
+        memcpy(g_temp[2*ch_data->t_env[0]], g_temp[2*ch_data->t_env_num_env_old], 4*sizeof(g_temp[0]));
+        memcpy(q_temp[2*ch_data->t_env[0]], q_temp[2*ch_data->t_env_num_env_old], 4*sizeof(q_temp[0]));
+    }
+
+    for (e = 0; e < ch_data->bs_num_env; e++) {
+        for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
+            g_temp1 = g_temp[h_SL + i];
+            pok = sbr->gain[e];
+            q_temp1 = q_temp[h_SL + i];
+            pok1 = sbr->q_m[e];
+
+            /* loop unrolled 4 times */
+            for (j=0; j<(size>>2); j++) {
+                __asm__ volatile (
+                    "lw      %[temp1],   0(%[pok])               \n\t"
+                    "lw      %[temp2],   4(%[pok])               \n\t"
+                    "lw      %[temp3],   8(%[pok])               \n\t"
+                    "lw      %[temp4],   12(%[pok])              \n\t"
+                    "sw      %[temp1],   0(%[g_temp1])           \n\t"
+                    "sw      %[temp2],   4(%[g_temp1])           \n\t"
+                    "sw      %[temp3],   8(%[g_temp1])           \n\t"
+                    "sw      %[temp4],   12(%[g_temp1])          \n\t"
+                    "lw      %[temp1],   0(%[pok1])              \n\t"
+                    "lw      %[temp2],   4(%[pok1])              \n\t"
+                    "lw      %[temp3],   8(%[pok1])              \n\t"
+                    "lw      %[temp4],   12(%[pok1])             \n\t"
+                    "sw      %[temp1],   0(%[q_temp1])           \n\t"
+                    "sw      %[temp2],   4(%[q_temp1])           \n\t"
+                    "sw      %[temp3],   8(%[q_temp1])           \n\t"
+                    "sw      %[temp4],   12(%[q_temp1])          \n\t"
+                    "addiu   %[pok],     %[pok],           16    \n\t"
+                    "addiu   %[g_temp1], %[g_temp1],       16    \n\t"
+                    "addiu   %[pok1],    %[pok1],          16    \n\t"
+                    "addiu   %[q_temp1], %[q_temp1],       16    \n\t"
+
+                    : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
+                      [temp3]"=&r"(temp3), [temp4]"=&r"(temp4),
+                      [pok]"+r"(pok), [g_temp1]"+r"(g_temp1),
+                      [pok1]"+r"(pok1), [q_temp1]"+r"(q_temp1)
+                    :
+                    : "memory"
+                );
+            }
+
+            for (j=0; j<(size&3); j++) {
+                __asm__ volatile (
+                    "lw      %[temp1],   0(%[pok])              \n\t"
+                    "lw      %[temp2],   0(%[pok1])             \n\t"
+                    "sw      %[temp1],   0(%[g_temp1])          \n\t"
+                    "sw      %[temp2],   0(%[q_temp1])          \n\t"
+                    "addiu   %[pok],     %[pok],          4     \n\t"
+                    "addiu   %[g_temp1], %[g_temp1],      4     \n\t"
+                    "addiu   %[pok1],    %[pok1],         4     \n\t"
+                    "addiu   %[q_temp1], %[q_temp1],      4     \n\t"
+
+                    : [temp1]"=&r"(temp1), [temp2]"=&r"(temp2),
+                      [temp3]"=&r"(temp3), [temp4]"=&r"(temp4),
+                      [pok]"+r"(pok), [g_temp1]"+r"(g_temp1),
+                      [pok1]"+r"(pok1), [q_temp1]"+r"(q_temp1)
+                    :
+                    : "memory"
+                );
+            }
+        }
+    }
+
+    for (e = 0; e < ch_data->bs_num_env; e++) {
+        for (i = 2 * ch_data->t_env[e]; i < 2 * ch_data->t_env[e + 1]; i++) {
+            LOCAL_ALIGNED_16(float, g_filt_tab, [48]);
+            LOCAL_ALIGNED_16(float, q_filt_tab, [48]);
+            float *g_filt, *q_filt;
+
+            if (h_SL && e != e_a[0] && e != e_a[1]) {
+                g_filt = g_filt_tab;
+                q_filt = q_filt_tab;
+
+                for (m = 0; m < m_max; m++) {
+                    const int idx1 = i + h_SL;
+                    g_filt[m] = 0.0f;
+                    q_filt[m] = 0.0f;
+
+                    for (j = 0; j <= h_SL; j++) {
+                        g_filt[m] += g_temp[idx1 - j][m] * h_smooth[j];
+                        q_filt[m] += q_temp[idx1 - j][m] * h_smooth[j];
+                    }
+                }
+            } else {
+                g_filt = g_temp[i + h_SL];
+                q_filt = q_temp[i];
+            }
+
+            sbr->dsp.hf_g_filt(Y1[i] + kx, X_high + kx, g_filt, m_max,
+                               i + ENVELOPE_ADJUSTMENT_OFFSET);
+
+            if (e != e_a[0] && e != e_a[1]) {
+                sbr->dsp.hf_apply_noise[indexsine](Y1[i] + kx, sbr->s_m[e],
+                                                   q_filt, indexnoise,
+                                                   kx, m_max);
+            } else {
+                int idx = indexsine&1;
+                int A = (1-((indexsine+(kx & 1))&2));
+                int B = (A^(-idx)) + idx;
+                float *out = &Y1[i][kx][idx];
+                float *in  = sbr->s_m[e];
+                float temp0, temp1, temp2, temp3, temp4, temp5;
+                float A_f = (float)A;
+                float B_f = (float)B;
+
+                for (m = 0; m+1 < m_max; m+=2) {
+
+                    temp2 = out[0];
+                    temp3 = out[2];
+
+                    __asm__ volatile(
+                        "lwc1    %[temp0],  0(%[in])                     \n\t"
+                        "lwc1    %[temp1],  4(%[in])                     \n\t"
+                        "madd.s  %[temp4],  %[temp2],  %[temp0], %[A_f]  \n\t"
+                        "madd.s  %[temp5],  %[temp3],  %[temp1], %[B_f]  \n\t"
+                        "swc1    %[temp4],  0(%[out])                    \n\t"
+                        "swc1    %[temp5],  8(%[out])                    \n\t"
+                        "addiu   %[in],     %[in],     8                 \n\t"
+                        "addiu   %[out],    %[out],    16                \n\t"
+
+                        : [temp0]"=&f" (temp0), [temp1]"=&f"(temp1),
+                          [temp4]"=&f" (temp4), [temp5]"=&f"(temp5),
+                          [in]"+r"(in), [out]"+r"(out)
+                        : [A_f]"f"(A_f), [B_f]"f"(B_f), [temp2]"f"(temp2),
+                          [temp3]"f"(temp3)
+                        : "memory"
+                    );
+                }
+                if(m_max&1)
+                    out[2*m  ] += in[m  ] * A;
+            }
+            indexnoise = (indexnoise + m_max) & 0x1ff;
+            indexsine = (indexsine + 1) & 3;
+        }
+    }
+    ch_data->f_indexnoise = indexnoise;
+    ch_data->f_indexsine  = indexsine;
+}
+
+static void sbr_hf_inverse_filter_mips(SBRDSPContext *dsp,
+                                  float (*alpha0)[2], float (*alpha1)[2],
+                                  const float X_low[32][40][2], int k0)
+{
+    int k;
+    float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, c;
+    float *phi1, *alpha_1, *alpha_0, res1, res2, temp_real, temp_im;;
+
+    c = 1.000001f;
+
+    for (k = 0; k < k0; k++) {
+        LOCAL_ALIGNED_16(float, phi, [3], [2][2]);
+        float dk;
+        phi1 = &phi[0][0][0];
+        alpha_1 = &alpha1[k][0];
+        alpha_0 = &alpha0[k][0];
+        dsp->autocorrelate(X_low[k], phi);
+
+        __asm__ volatile (
+            "lwc1    %[temp0],  40(%[phi1])                       \n\t"
+            "lwc1    %[temp1],  16(%[phi1])                       \n\t"
+            "lwc1    %[temp2],  24(%[phi1])                       \n\t"
+            "lwc1    %[temp3],  28(%[phi1])                       \n\t"
+            "mul.s   %[dk],     %[temp0],    %[temp1]             \n\t"
+            "lwc1    %[temp4],  0(%[phi1])                        \n\t"
+            "mul.s   %[res2],   %[temp2],    %[temp2]             \n\t"
+            "lwc1    %[temp5],  4(%[phi1])                        \n\t"
+            "madd.s  %[res2],   %[res2],     %[temp3],  %[temp3]  \n\t"
+            "lwc1    %[temp6],  8(%[phi1])                        \n\t"
+            "div.s   %[res2],   %[res2],     %[c]                 \n\t"
+            "lwc1    %[temp0],  12(%[phi1])                       \n\t"
+            "sub.s   %[dk],     %[dk],       %[res2]              \n\t"
+
+            : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+              [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+              [temp6]"=&f"(temp6), [res2]"=&f"(res2), [dk]"=&f"(dk)
+            : [phi1]"r"(phi1), [c]"f"(c)
+            : "memory"
+        );
+
+        if (!dk) {
+            alpha_1[0] = 0;
+            alpha_1[1] = 0;
+        } else {
+            __asm__ volatile (
+                "mul.s   %[temp_real], %[temp4],     %[temp2]            \n\t"
+                "nmsub.s %[temp_real], %[temp_real], %[temp5], %[temp3]  \n\t"
+                "nmsub.s %[temp_real], %[temp_real], %[temp6], %[temp1]  \n\t"
+                "mul.s   %[temp_im],   %[temp4],     %[temp3]            \n\t"
+                "madd.s  %[temp_im],   %[temp_im],   %[temp5], %[temp2]  \n\t"
+                "nmsub.s %[temp_im],   %[temp_im],   %[temp0], %[temp1]  \n\t"
+                "div.s   %[temp_real], %[temp_real], %[dk]               \n\t"
+                "div.s   %[temp_im],   %[temp_im],   %[dk]               \n\t"
+                "swc1    %[temp_real], 0(%[alpha_1])                     \n\t"
+                "swc1    %[temp_im],   4(%[alpha_1])                     \n\t"
+
+                : [temp_real]"=&f" (temp_real), [temp_im]"=&f"(temp_im)
+                : [phi1]"r"(phi1), [temp0]"f"(temp0), [temp1]"f"(temp1),
+                  [temp2]"f"(temp2), [temp3]"f"(temp3), [temp4]"f"(temp4),
+                  [temp5]"f"(temp5), [temp6]"f"(temp6),
+                  [alpha_1]"r"(alpha_1), [dk]"f"(dk)
+                : "memory"
+            );
+        }
+
+        if (!phi1[4]) {
+            alpha_0[0] = 0;
+            alpha_0[1] = 0;
+        } else {
+            __asm__ volatile (
+                "lwc1    %[temp6],     0(%[alpha_1])                     \n\t"
+                "lwc1    %[temp7],     4(%[alpha_1])                     \n\t"
+                "mul.s   %[temp_real], %[temp6],     %[temp2]            \n\t"
+                "add.s   %[temp_real], %[temp_real], %[temp4]            \n\t"
+                "madd.s  %[temp_real], %[temp_real], %[temp7], %[temp3]  \n\t"
+                "mul.s   %[temp_im],   %[temp7],     %[temp2]            \n\t"
+                "add.s   %[temp_im],   %[temp_im],   %[temp5]            \n\t"
+                "nmsub.s %[temp_im],   %[temp_im],   %[temp6], %[temp3]  \n\t"
+                "div.s   %[temp_real], %[temp_real], %[temp1]            \n\t"
+                "div.s   %[temp_im],   %[temp_im],   %[temp1]            \n\t"
+                "neg.s   %[temp_real], %[temp_real]                      \n\t"
+                "neg.s   %[temp_im],   %[temp_im]                        \n\t"
+                "swc1    %[temp_real], 0(%[alpha_0])                     \n\t"
+                "swc1    %[temp_im],   4(%[alpha_0])                     \n\t"
+
+                : [temp_real]"=&f"(temp_real), [temp_im]"=&f"(temp_im),
+                  [temp6]"=&f"(temp6), [temp7]"=&f"(temp7),
+                  [res1]"=&f"(res1), [res2]"=&f"(res2)
+                : [alpha_1]"r"(alpha_1), [alpha_0]"r"(alpha_0),
+                  [temp0]"f"(temp0), [temp1]"f"(temp1), [temp2]"f"(temp2),
+                  [temp3]"f"(temp3), [temp4]"f"(temp4), [temp5]"f"(temp5)
+                : "memory"
+            );
+        }
+
+        __asm__ volatile (
+            "lwc1    %[temp1],      0(%[alpha_1])                           \n\t"
+            "lwc1    %[temp2],      4(%[alpha_1])                           \n\t"
+            "lwc1    %[temp_real],  0(%[alpha_0])                           \n\t"
+            "lwc1    %[temp_im],    4(%[alpha_0])                           \n\t"
+            "mul.s   %[res1],       %[temp1],      %[temp1]                 \n\t"
+            "madd.s  %[res1],       %[res1],       %[temp2],    %[temp2]    \n\t"
+            "mul.s   %[res2],       %[temp_real],  %[temp_real]             \n\t"
+            "madd.s  %[res2],       %[res2],       %[temp_im],  %[temp_im]  \n\t"
+
+            : [temp_real]"=&f"(temp_real), [temp_im]"=&f"(temp_im),
+              [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+              [res1]"=&f"(res1), [res2]"=&f"(res2)
+            : [alpha_1]"r"(alpha_1), [alpha_0]"r"(alpha_0)
+            : "memory"
+        );
+
+        if (res1 >= 16.0f || res2 >= 16.0f) {
+            alpha_1[0] = 0;
+            alpha_1[1] = 0;
+            alpha_0[0] = 0;
+            alpha_0[1] = 0;
+        }
+    }
+}
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+void ff_aacsbr_init_mips(AACSBRContext *c)
+{
+#if HAVE_INLINE_ASM
+    c->sbr_lf_gen            = sbr_lf_gen_mips;
+    c->sbr_x_gen             = sbr_x_gen_mips;
+#if HAVE_MIPSFPU
+    c->sbr_hf_inverse_filter = sbr_hf_inverse_filter_mips;
+    c->sbr_hf_assemble       = sbr_hf_assemble_mips;
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+}
diff --git a/libavcodec/mips/aacsbr_mips.h b/libavcodec/mips/aacsbr_mips.h
new file mode 100644
index 0000000..aca78d7
--- /dev/null
+++ b/libavcodec/mips/aacsbr_mips.h
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2012
+ *      MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors:  Djordje Pesut   (djordje at mips.com)
+ *           Mirjana Vulin   (mvulin at mips.com)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/aacsbr.c
+ */
+
+#ifndef AVCODEC_MIPS_AACSBR_FLOAT_H
+#define AVCODEC_MIPS_AACSBR_FLOAT_H
+
+#include "libavcodec/aac.h"
+#include "libavcodec/sbr.h"
+
+#if HAVE_INLINE_ASM
+static void sbr_qmf_analysis_mips(DSPContext *dsp, FFTContext *mdct,
+                             SBRDSPContext *sbrdsp, const float *in, float *x,
+                             float z[320], float W[2][32][32][2], int buf_idx)
+{
+    int i;
+    float *w0;
+    float *w1;
+    int temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+
+    w0 = x;
+    w1 = x + 1024;
+    for(i = 0; i < 36; i++)
+    {
+        /* loop unrolled 8 times */
+        __asm__ volatile(
+            "lw      %[temp0],   0(%[w1])         \n\t"
+            "lw      %[temp1],   4(%[w1])         \n\t"
+            "lw      %[temp2],   8(%[w1])         \n\t"
+            "lw      %[temp3],   12(%[w1])        \n\t"
+            "lw      %[temp4],   16(%[w1])        \n\t"
+            "lw      %[temp5],   20(%[w1])        \n\t"
+            "lw      %[temp6],   24(%[w1])        \n\t"
+            "lw      %[temp7],   28(%[w1])        \n\t"
+            "sw      %[temp0],   0(%[w0])         \n\t"
+            "sw      %[temp1],   4(%[w0])         \n\t"
+            "sw      %[temp2],   8(%[w0])         \n\t"
+            "sw      %[temp3],   12(%[w0])        \n\t"
+            "sw      %[temp4],   16(%[w0])        \n\t"
+            "sw      %[temp5],   20(%[w0])        \n\t"
+            "sw      %[temp6],   24(%[w0])        \n\t"
+            "sw      %[temp7],   28(%[w0])        \n\t"
+            "addiu   %[w0],      %[w0],     32    \n\t"
+            "addiu   %[w1],      %[w1],     32    \n\t"
+
+            : [w0]"+r"(w0), [w1]"+r"(w1),
+              [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+              [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+              [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+              [temp6]"=&r"(temp6), [temp7]"=&r"(temp7)
+            :
+            : "memory"
+        );
+    }
+
+    w0 = x + 288;
+    w1 = (float*)in;
+    for(i = 0; i < 128; i++)
+    {
+        /* loop unrolled 8 times */
+        __asm__ volatile(
+            "lw       %[temp0],    0(%[w1])        \n\t"
+            "lw       %[temp1],    4(%[w1])        \n\t"
+            "lw       %[temp2],    8(%[w1])        \n\t"
+            "lw       %[temp3],    12(%[w1])       \n\t"
+            "lw       %[temp4],    16(%[w1])       \n\t"
+            "lw       %[temp5],    20(%[w1])       \n\t"
+            "lw       %[temp6],    24(%[w1])       \n\t"
+            "lw       %[temp7],    28(%[w1])       \n\t"
+            "sw       %[temp0],    0(%[w0])        \n\t"
+            "sw       %[temp1],    4(%[w0])        \n\t"
+            "sw       %[temp2],    8(%[w0])        \n\t"
+            "sw       %[temp3],    12(%[w0])       \n\t"
+            "sw       %[temp4],    16(%[w0])       \n\t"
+            "sw       %[temp5],    20(%[w0])       \n\t"
+            "sw       %[temp6],    24(%[w0])       \n\t"
+            "sw       %[temp7],    28(%[w0])       \n\t"
+            "addiu    %[w0],       %[w0],     32   \n\t"
+            "addiu    %[w1],       %[w1],     32   \n\t"
+
+            : [w0]"+r"(w0), [w1]"+r"(w1),
+              [temp0]"=&r"(temp0), [temp1]"=&r"(temp1),
+              [temp2]"=&r"(temp2), [temp3]"=&r"(temp3),
+              [temp4]"=&r"(temp4), [temp5]"=&r"(temp5),
+              [temp6]"=&r"(temp6), [temp7]"=&r"(temp7)
+            :
+            : "memory"
+        );
+    }
+
+    for (i = 0; i < 32; i++) { // numTimeSlots*RATE = 16*2 as 960 sample frames
+                               // are not supported
+        dsp->vector_fmul_reverse(z, sbr_qmf_window_ds, x, 320);
+        sbrdsp->sum64x5(z);
+        sbrdsp->qmf_pre_shuffle(z);
+        mdct->imdct_half(mdct, z, z+64);
+        sbrdsp->qmf_post_shuffle(W[buf_idx][i], z);
+        x += 32;
+    }
+}
+
+#if HAVE_MIPSFPU
+static void sbr_qmf_synthesis_mips(DSPContext *dsp, FFTContext *mdct,
+                              SBRDSPContext *sbrdsp, AVFloatDSPContext *fdsp,
+                              float *out, float X[2][38][64],
+                              float mdct_buf[2][64],
+                              float *v0, int *v_off, const unsigned int div)
+{
+    int i, n;
+    const float *sbr_qmf_window = div ? sbr_qmf_window_ds : sbr_qmf_window_us;
+    const int step = 128 >> div;
+    float *v;
+    float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7, temp8, temp9, temp10, temp11, temp12, temp13;
+    float temp14, temp15, temp16, temp17, temp18, temp19;
+    float *vv0, *s0, *dst;
+    dst = out;
+
+    for (i = 0; i < 32; i++) {
+        if (*v_off < step) {
+            int saved_samples = (1280 - 128) >> div;
+            memcpy(&v0[SBR_SYNTHESIS_BUF_SIZE - saved_samples], v0, saved_samples * sizeof(float));
+            *v_off = SBR_SYNTHESIS_BUF_SIZE - saved_samples - step;
+        } else {
+            *v_off -= step;
+        }
+        v = v0 + *v_off;
+        if (div) {
+            for (n = 0; n < 32; n++) {
+                X[0][i][   n] = -X[0][i][n];
+                X[0][i][32+n] =  X[1][i][31-n];
+            }
+            mdct->imdct_half(mdct, mdct_buf[0], X[0][i]);
+            sbrdsp->qmf_deint_neg(v, mdct_buf[0]);
+        } else {
+            sbrdsp->neg_odd_64(X[1][i]);
+            mdct->imdct_half(mdct, mdct_buf[0], X[0][i]);
+            mdct->imdct_half(mdct, mdct_buf[1], X[1][i]);
+            sbrdsp->qmf_deint_bfly(v, mdct_buf[1], mdct_buf[0]);
+        }
+
+        if(div == 0)
+        {
+            float *v0_end;
+            vv0 = v;
+            v0_end = v + 60;
+            s0 = (float*)sbr_qmf_window;
+
+            /* 10 calls of function vector_fmul_add merged into one loop
+               and loop unrolled 4 times */
+            __asm__ volatile(
+                ".set    push                                           \n\t"
+                ".set    noreorder                                      \n\t"
+                "lwc1    %[temp4],   0(%[v0])                           \n\t"
+                "lwc1    %[temp5],   0(%[s0])                           \n\t"
+                "lwc1    %[temp6],   4(%[v0])                           \n\t"
+                "lwc1    %[temp7],   4(%[s0])                           \n\t"
+                "lwc1    %[temp8],   8(%[v0])                           \n\t"
+                "lwc1    %[temp9],   8(%[s0])                           \n\t"
+                "lwc1    %[temp10],  12(%[v0])                          \n\t"
+                "lwc1    %[temp11],  12(%[s0])                          \n\t"
+                "lwc1    %[temp12],  768(%[v0])                         \n\t"
+                "lwc1    %[temp13],  256(%[s0])                         \n\t"
+                "lwc1    %[temp14],  772(%[v0])                         \n\t"
+                "lwc1    %[temp15],  260(%[s0])                         \n\t"
+                "lwc1    %[temp16],  776(%[v0])                         \n\t"
+                "lwc1    %[temp17],  264(%[s0])                         \n\t"
+                "lwc1    %[temp18],  780(%[v0])                         \n\t"
+                "lwc1    %[temp19],  268(%[s0])                         \n\t"
+            "1:                                                         \n\t"
+                "mul.s   %[temp0],   %[temp4],   %[temp5]               \n\t"
+                "lwc1    %[temp4],   1024(%[v0])                        \n\t"
+                "mul.s   %[temp1],   %[temp6],   %[temp7]               \n\t"
+                "lwc1    %[temp5],   512(%[s0])                         \n\t"
+                "mul.s   %[temp2],   %[temp8],   %[temp9]               \n\t"
+                "lwc1    %[temp6],   1028(%[v0])                        \n\t"
+                "mul.s   %[temp3],   %[temp10],  %[temp11]              \n\t"
+                "lwc1    %[temp7],   516(%[s0])                         \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   1032(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   520(%[s0])                         \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  1036(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  524(%[s0])                         \n\t"
+                "lwc1    %[temp12],  1792(%[v0])                        \n\t"
+                "lwc1    %[temp13],  768(%[s0])                         \n\t"
+                "lwc1    %[temp14],  1796(%[v0])                        \n\t"
+                "lwc1    %[temp15],  772(%[s0])                         \n\t"
+                "lwc1    %[temp16],  1800(%[v0])                        \n\t"
+                "lwc1    %[temp17],  776(%[s0])                         \n\t"
+                "lwc1    %[temp18],  1804(%[v0])                        \n\t"
+                "lwc1    %[temp19],  780(%[s0])                         \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp4],   %[temp5]   \n\t"
+                "lwc1    %[temp4],   2048(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp6],   %[temp7]   \n\t"
+                "lwc1    %[temp5],   1024(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp8],   %[temp9]   \n\t"
+                "lwc1    %[temp6],   2052(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp10],  %[temp11]  \n\t"
+                "lwc1    %[temp7],   1028(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   2056(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   1032(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  2060(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  1036(%[s0])                        \n\t"
+                "lwc1    %[temp12],  2816(%[v0])                        \n\t"
+                "lwc1    %[temp13],  1280(%[s0])                        \n\t"
+                "lwc1    %[temp14],  2820(%[v0])                        \n\t"
+                "lwc1    %[temp15],  1284(%[s0])                        \n\t"
+                "lwc1    %[temp16],  2824(%[v0])                        \n\t"
+                "lwc1    %[temp17],  1288(%[s0])                        \n\t"
+                "lwc1    %[temp18],  2828(%[v0])                        \n\t"
+                "lwc1    %[temp19],  1292(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp4],   %[temp5]   \n\t"
+                "lwc1    %[temp4],   3072(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp6],   %[temp7]   \n\t"
+                "lwc1    %[temp5],   1536(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp8],   %[temp9]   \n\t"
+                "lwc1    %[temp6],   3076(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp10],  %[temp11]  \n\t"
+                "lwc1    %[temp7],   1540(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   3080(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   1544(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  3084(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  1548(%[s0])                        \n\t"
+                "lwc1    %[temp12],  3840(%[v0])                        \n\t"
+                "lwc1    %[temp13],  1792(%[s0])                        \n\t"
+                "lwc1    %[temp14],  3844(%[v0])                        \n\t"
+                "lwc1    %[temp15],  1796(%[s0])                        \n\t"
+                "lwc1    %[temp16],  3848(%[v0])                        \n\t"
+                "lwc1    %[temp17],  1800(%[s0])                        \n\t"
+                "lwc1    %[temp18],  3852(%[v0])                        \n\t"
+                "lwc1    %[temp19],  1804(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp4],   %[temp5]   \n\t"
+                "lwc1    %[temp4],   4096(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp6],   %[temp7]   \n\t"
+                "lwc1    %[temp5],   2048(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp8],   %[temp9]   \n\t"
+                "lwc1    %[temp6],   4100(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp10],  %[temp11]  \n\t"
+                "lwc1    %[temp7],   2052(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   4104(%[v0])                        \n\t"
+                "addiu   %[dst],     %[dst],     16                     \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   2056(%[s0])                        \n\t"
+                "addiu   %[s0],      %[s0],      16                     \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  4108(%[v0])                        \n\t"
+                "addiu   %[v0],      %[v0],      16                     \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  2044(%[s0])                        \n\t"
+                "lwc1    %[temp12],  4848(%[v0])                        \n\t"
+                "lwc1    %[temp13],  2288(%[s0])                        \n\t"
+                "lwc1    %[temp14],  4852(%[v0])                        \n\t"
+                "lwc1    %[temp15],  2292(%[s0])                        \n\t"
+                "lwc1    %[temp16],  4856(%[v0])                        \n\t"
+                "lwc1    %[temp17],  2296(%[s0])                        \n\t"
+                "lwc1    %[temp18],  4860(%[v0])                        \n\t"
+                "lwc1    %[temp19],  2300(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp4],   %[temp5]   \n\t"
+                "lwc1    %[temp4],   0(%[v0])                           \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp6],   %[temp7]   \n\t"
+                "lwc1    %[temp5],   0(%[s0])                           \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp8],   %[temp9]   \n\t"
+                "lwc1    %[temp6],   4(%[v0])                           \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp10],  %[temp11]  \n\t"
+                "lwc1    %[temp7],   4(%[s0])                           \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   8(%[v0])                           \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   8(%[s0])                           \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  12(%[v0])                          \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  12(%[s0])                          \n\t"
+                "lwc1    %[temp12],  768(%[v0])                         \n\t"
+                "lwc1    %[temp13],  256(%[s0])                         \n\t"
+                "lwc1    %[temp14],  772(%[v0])                         \n\t"
+                "lwc1    %[temp15],  260(%[s0])                         \n\t"
+                "lwc1    %[temp16],  776(%[v0])                         \n\t"
+                "lwc1    %[temp17],  264(%[s0])                         \n\t"
+                "lwc1    %[temp18],  780(%[v0])                         \n\t"
+                "lwc1    %[temp19],  268(%[s0])                         \n\t"
+                "swc1    %[temp0],   -16(%[dst])                        \n\t"
+                "swc1    %[temp1],   -12(%[dst])                        \n\t"
+                "swc1    %[temp2],   -8(%[dst])                         \n\t"
+                "bne     %[v0],      %[v0_end],  1b                     \n\t"
+                " swc1   %[temp3],   -4(%[dst])                         \n\t"
+                "mul.s   %[temp0],   %[temp4],   %[temp5]               \n\t"
+                "lwc1    %[temp4],   1024(%[v0])                        \n\t"
+                "mul.s   %[temp1],   %[temp6],   %[temp7]               \n\t"
+                "lwc1    %[temp5],   512(%[s0])                         \n\t"
+                "mul.s   %[temp2],   %[temp8],   %[temp9]               \n\t"
+                "lwc1    %[temp6],   1028(%[v0])                        \n\t"
+                "mul.s   %[temp3],   %[temp10],  %[temp11]              \n\t"
+                "lwc1    %[temp7],   516(%[s0])                         \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   1032(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   520(%[s0])                         \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  1036(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  524(%[s0])                         \n\t"
+                "lwc1    %[temp12],  1792(%[v0])                        \n\t"
+                "lwc1    %[temp13],  768(%[s0])                         \n\t"
+                "lwc1    %[temp14],  1796(%[v0])                        \n\t"
+                "lwc1    %[temp15],  772(%[s0])                         \n\t"
+                "lwc1    %[temp16],  1800(%[v0])                        \n\t"
+                "lwc1    %[temp17],  776(%[s0])                         \n\t"
+                "lwc1    %[temp18],  1804(%[v0])                        \n\t"
+                "lwc1    %[temp19],  780(%[s0])                         \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp4],   %[temp5]   \n\t"
+                "lwc1    %[temp4],   2048(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp6],   %[temp7]   \n\t"
+                "lwc1    %[temp5],   1024(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp8],   %[temp9]   \n\t"
+                "lwc1    %[temp6],   2052(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp10],  %[temp11]  \n\t"
+                "lwc1    %[temp7],   1028(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   2056(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   1032(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  2060(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  1036(%[s0])                        \n\t"
+                "lwc1    %[temp12],  2816(%[v0])                        \n\t"
+                "lwc1    %[temp13],  1280(%[s0])                        \n\t"
+                "lwc1    %[temp14],  2820(%[v0])                        \n\t"
+                "lwc1    %[temp15],  1284(%[s0])                        \n\t"
+                "lwc1    %[temp16],  2824(%[v0])                        \n\t"
+                "lwc1    %[temp17],  1288(%[s0])                        \n\t"
+                "lwc1    %[temp18],  2828(%[v0])                        \n\t"
+                "lwc1    %[temp19],  1292(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp4],   %[temp5]   \n\t"
+                "lwc1    %[temp4],   3072(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp6],   %[temp7]   \n\t"
+                "lwc1    %[temp5],   1536(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp8],   %[temp9]   \n\t"
+                "lwc1    %[temp6],   3076(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp10],  %[temp11]  \n\t"
+                "lwc1    %[temp7],   1540(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   3080(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   1544(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  3084(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  1548(%[s0])                        \n\t"
+                "lwc1    %[temp12],  3840(%[v0])                        \n\t"
+                "lwc1    %[temp13],  1792(%[s0])                        \n\t"
+                "lwc1    %[temp14],  3844(%[v0])                        \n\t"
+                "lwc1    %[temp15],  1796(%[s0])                        \n\t"
+                "lwc1    %[temp16],  3848(%[v0])                        \n\t"
+                "lwc1    %[temp17],  1800(%[s0])                        \n\t"
+                "lwc1    %[temp18],  3852(%[v0])                        \n\t"
+                "lwc1    %[temp19],  1804(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp4],   %[temp5]   \n\t"
+                "lwc1    %[temp4],   4096(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp6],   %[temp7]   \n\t"
+                "lwc1    %[temp5],   2048(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp8],   %[temp9]   \n\t"
+                "lwc1    %[temp6],   4100(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp10],  %[temp11]  \n\t"
+                "lwc1    %[temp7],   2052(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "lwc1    %[temp8],   4104(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "lwc1    %[temp9],   2056(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "lwc1    %[temp10],  4108(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "lwc1    %[temp11],  2060(%[s0])                        \n\t"
+                "lwc1    %[temp12],  4864(%[v0])                        \n\t"
+                "lwc1    %[temp13],  2304(%[s0])                        \n\t"
+                "lwc1    %[temp14],  4868(%[v0])                        \n\t"
+                "lwc1    %[temp15],  2308(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp4],   %[temp5]   \n\t"
+                "lwc1    %[temp16],  4872(%[v0])                        \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp6],   %[temp7]   \n\t"
+                "lwc1    %[temp17],  2312(%[s0])                        \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp8],   %[temp9]   \n\t"
+                "lwc1    %[temp18],  4876(%[v0])                        \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp10],  %[temp11]  \n\t"
+                "lwc1    %[temp19],  2316(%[s0])                        \n\t"
+                "madd.s  %[temp0],   %[temp0],   %[temp12],  %[temp13]  \n\t"
+                "addiu   %[dst],     %[dst],     16                     \n\t"
+                "madd.s  %[temp1],   %[temp1],   %[temp14],  %[temp15]  \n\t"
+                "madd.s  %[temp2],   %[temp2],   %[temp16],  %[temp17]  \n\t"
+                "madd.s  %[temp3],   %[temp3],   %[temp18],  %[temp19]  \n\t"
+                "swc1    %[temp0],   -16(%[dst])                        \n\t"
+                "swc1    %[temp1],   -12(%[dst])                        \n\t"
+                "swc1    %[temp2],   -8(%[dst])                         \n\t"
+                "swc1    %[temp3],   -4(%[dst])                         \n\t"
+                ".set    pop                                            \n\t"
+
+                : [dst]"+r"(dst), [v0]"+r"(vv0), [s0]"+r"(s0),
+                  [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+                  [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+                  [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+                  [temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+                  [temp12]"=&f"(temp12), [temp13]"=&f"(temp13), [temp14]"=&f"(temp14),
+                  [temp15]"=&f"(temp15), [temp16]"=&f"(temp16), [temp17]"=&f"(temp17),
+                  [temp18]"=&f"(temp18), [temp19]"=&f"(temp19)
+                : [v0_end]"r"(v0_end)
+                : "memory"
+            );
+        }
+        else
+        {
+            fdsp->vector_fmul   (out, v                , sbr_qmf_window                       , 64 >> div);
+            dsp->vector_fmul_add(out, v + ( 192 >> div), sbr_qmf_window + ( 64 >> div), out   , 64 >> div);
+            dsp->vector_fmul_add(out, v + ( 256 >> div), sbr_qmf_window + (128 >> div), out   , 64 >> div);
+            dsp->vector_fmul_add(out, v + ( 448 >> div), sbr_qmf_window + (192 >> div), out   , 64 >> div);
+            dsp->vector_fmul_add(out, v + ( 512 >> div), sbr_qmf_window + (256 >> div), out   , 64 >> div);
+            dsp->vector_fmul_add(out, v + ( 704 >> div), sbr_qmf_window + (320 >> div), out   , 64 >> div);
+            dsp->vector_fmul_add(out, v + ( 768 >> div), sbr_qmf_window + (384 >> div), out   , 64 >> div);
+            dsp->vector_fmul_add(out, v + ( 960 >> div), sbr_qmf_window + (448 >> div), out   , 64 >> div);
+            dsp->vector_fmul_add(out, v + (1024 >> div), sbr_qmf_window + (512 >> div), out   , 64 >> div);
+            dsp->vector_fmul_add(out, v + (1216 >> div), sbr_qmf_window + (576 >> div), out   , 64 >> div);
+            out += 64 >> div;
+        }
+    }
+}
+
+#define sbr_qmf_analysis sbr_qmf_analysis_mips
+#define sbr_qmf_synthesis sbr_qmf_synthesis_mips
+
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+#endif /* AVCODEC_MIPS_AACSBR_FLOAT_H */
diff --git a/libavcodec/mips/dsputil_mips.c b/libavcodec/mips/dsputil_mips.c
index 172cbfa..3f9b61f 100644
--- a/libavcodec/mips/dsputil_mips.c
+++ b/libavcodec/mips/dsputil_mips.c
@@ -160,6 +160,45 @@ static void vector_fmul_window_mips(float *dst, const float *src0,
     }
 }
 
+static void vector_fmul_reverse_mips(float *dst, const float *src0, const float *src1, int len){
+    int i;
+    float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7;
+    src1 += len-1;
+
+    for(i=0; i<(len>>2); i++)
+    {
+        /* loop unrolled 4 times */
+        __asm__ volatile(
+            "lwc1      %[temp0],     0(%[src0])                 \n\t"
+            "lwc1      %[temp1],     0(%[src1])                 \n\t"
+            "lwc1      %[temp2],     4(%[src0])                 \n\t"
+            "lwc1      %[temp3],     -4(%[src1])                \n\t"
+            "lwc1      %[temp4],     8(%[src0])                 \n\t"
+            "lwc1      %[temp5],     -8(%[src1])                \n\t"
+            "lwc1      %[temp6],     12(%[src0])                \n\t"
+            "lwc1      %[temp7],     -12(%[src1])               \n\t"
+            "mul.s     %[temp0],     %[temp1],     %[temp0]     \n\t"
+            "mul.s     %[temp2],     %[temp3],     %[temp2]     \n\t"
+            "mul.s     %[temp4],     %[temp5],     %[temp4]     \n\t"
+            "mul.s     %[temp6],     %[temp7],     %[temp6]     \n\t"
+            "addiu     %[src0],      %[src0],      16           \n\t"
+            "addiu     %[src1],      %[src1],      -16          \n\t"
+            "addiu     %[dst],       %[dst],       16           \n\t"
+            "swc1      %[temp0],     -16(%[dst])                \n\t"
+            "swc1      %[temp2],     -12(%[dst])                \n\t"
+            "swc1      %[temp4],     -8(%[dst])                 \n\t"
+            "swc1      %[temp6],     -4(%[dst])                 \n\t"
+
+            : [dst]"+r"(dst), [src0]"+r"(src0), [src1]"+r"(src1),
+              [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),[temp2]"=&f"(temp2),
+              [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+              [temp6]"=&f"(temp6), [temp7]"=&f"(temp7)
+            :
+            : "memory"
+        );
+    }
+}
+
 static void butterflies_float_mips(float *av_restrict v1, float *av_restrict v2,
                                 int len)
 {
@@ -243,6 +282,7 @@ av_cold void ff_dsputil_init_mips( DSPContext* c, AVCodecContext *avctx )
 {
 #if HAVE_INLINE_ASM
     c->vector_fmul_window = vector_fmul_window_mips;
+    c->vector_fmul_reverse = vector_fmul_reverse_mips;
     c->butterflies_float   = butterflies_float_mips;
 #endif /* HAVE_INLINE_ASM */
 }
diff --git a/libavcodec/mips/sbrdsp_mips.c b/libavcodec/mips/sbrdsp_mips.c
new file mode 100644
index 0000000..d4460ba
--- /dev/null
+++ b/libavcodec/mips/sbrdsp_mips.c
@@ -0,0 +1,940 @@
+/*
+ * Copyright (c) 2012
+ *      MIPS Technologies, Inc., California.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Authors:  Darko Laus      (darko at mips.com)
+ *           Djordje Pesut   (djordje at mips.com)
+ *           Mirjana Vulin   (mvulin at mips.com)
+ *
+ * AAC Spectral Band Replication decoding functions optimized for MIPS
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Reference: libavcodec/sbrdsp.c
+ */
+
+#include "config.h"
+#include "libavcodec/sbrdsp.h"
+
+#if HAVE_INLINE_ASM
+static void sbr_neg_odd_64_mips(float *x)
+{
+    int Temp1, Temp2, Temp3, Temp4, Temp5;
+    float *x1    = &x[1];
+    float *x_end = x1 + 64;
+
+    /* loop unrolled 4 times */
+    __asm__ volatile (
+        "lui    %[Temp5],   0x8000                  \n\t"
+    "1:                                             \n\t"
+        "lw     %[Temp1],   0(%[x1])                \n\t"
+        "lw     %[Temp2],   8(%[x1])                \n\t"
+        "lw     %[Temp3],   16(%[x1])               \n\t"
+        "lw     %[Temp4],   24(%[x1])               \n\t"
+        "xor    %[Temp1],   %[Temp1],   %[Temp5]    \n\t"
+        "xor    %[Temp2],   %[Temp2],   %[Temp5]    \n\t"
+        "xor    %[Temp3],   %[Temp3],   %[Temp5]    \n\t"
+        "xor    %[Temp4],   %[Temp4],   %[Temp5]    \n\t"
+        "sw     %[Temp1],   0(%[x1])                \n\t"
+        "sw     %[Temp2],   8(%[x1])                \n\t"
+        "sw     %[Temp3],   16(%[x1])               \n\t"
+        "sw     %[Temp4],   24(%[x1])               \n\t"
+        "addiu  %[x1],      %[x1],      32          \n\t"
+        "bne    %[x1],      %[x_end],   1b          \n\t"
+
+        : [Temp1]"=&r"(Temp1), [Temp2]"=&r"(Temp2),
+          [Temp3]"=&r"(Temp3), [Temp4]"=&r"(Temp4),
+          [Temp5]"=&r"(Temp5), [x1]"+r"(x1)
+        : [x_end]"r"(x_end)
+        : "memory"
+    );
+}
+
+static void sbr_qmf_pre_shuffle_mips(float *z)
+{
+    int Temp1, Temp2, Temp3, Temp4, Temp5, Temp6;
+    float *z1 = &z[66];
+    float *z2 = &z[59];
+    float *z3 = &z[2];
+    float *z4 = z1 + 60;
+
+    /* loop unrolled 5 times */
+    __asm__ volatile (
+        "lui    %[Temp6],   0x8000                  \n\t"
+    "1:                                             \n\t"
+        "lw     %[Temp1],   0(%[z2])                \n\t"
+        "lw     %[Temp2],   4(%[z2])                \n\t"
+        "lw     %[Temp3],   8(%[z2])                \n\t"
+        "lw     %[Temp4],   12(%[z2])               \n\t"
+        "lw     %[Temp5],   16(%[z2])               \n\t"
+        "xor    %[Temp1],   %[Temp1],   %[Temp6]    \n\t"
+        "xor    %[Temp2],   %[Temp2],   %[Temp6]    \n\t"
+        "xor    %[Temp3],   %[Temp3],   %[Temp6]    \n\t"
+        "xor    %[Temp4],   %[Temp4],   %[Temp6]    \n\t"
+        "xor    %[Temp5],   %[Temp5],   %[Temp6]    \n\t"
+        "addiu  %[z2],      %[z2],      -20         \n\t"
+        "sw     %[Temp1],   32(%[z1])               \n\t"
+        "sw     %[Temp2],   24(%[z1])               \n\t"
+        "sw     %[Temp3],   16(%[z1])               \n\t"
+        "sw     %[Temp4],   8(%[z1])                \n\t"
+        "sw     %[Temp5],   0(%[z1])                \n\t"
+        "lw     %[Temp1],   0(%[z3])                \n\t"
+        "lw     %[Temp2],   4(%[z3])                \n\t"
+        "lw     %[Temp3],   8(%[z3])                \n\t"
+        "lw     %[Temp4],   12(%[z3])               \n\t"
+        "lw     %[Temp5],   16(%[z3])               \n\t"
+        "sw     %[Temp1],   4(%[z1])                \n\t"
+        "sw     %[Temp2],   12(%[z1])               \n\t"
+        "sw     %[Temp3],   20(%[z1])               \n\t"
+        "sw     %[Temp4],   28(%[z1])               \n\t"
+        "sw     %[Temp5],   36(%[z1])               \n\t"
+        "addiu  %[z3],      %[z3],      20          \n\t"
+        "addiu  %[z1],      %[z1],      40          \n\t"
+        "bne    %[z1],      %[z4],      1b          \n\t"
+        "lw     %[Temp1],   132(%[z])               \n\t"
+        "lw     %[Temp2],   128(%[z])               \n\t"
+        "lw     %[Temp3],   0(%[z])                 \n\t"
+        "lw     %[Temp4],   4(%[z])                 \n\t"
+        "xor    %[Temp1],   %[Temp1],   %[Temp6]    \n\t"
+        "sw     %[Temp1],   504(%[z])               \n\t"
+        "sw     %[Temp2],   508(%[z])               \n\t"
+        "sw     %[Temp3],   256(%[z])               \n\t"
+        "sw     %[Temp4],   260(%[z])               \n\t"
+
+        : [Temp1]"=&r"(Temp1), [Temp2]"=&r"(Temp2),
+          [Temp3]"=&r"(Temp3), [Temp4]"=&r"(Temp4),
+          [Temp5]"=&r"(Temp5), [Temp6]"=&r"(Temp6),
+          [z1]"+r"(z1), [z2]"+r"(z2), [z3]"+r"(z3)
+        : [z4]"r"(z4), [z]"r"(z)
+        : "memory"
+    );
+}
+
+static void sbr_qmf_post_shuffle_mips(float W[32][2], const float *z)
+{
+    int Temp1, Temp2, Temp3, Temp4, Temp5;
+    float *W_ptr = (float *)W;
+    float *z1    = (float *)z;
+    float *z2    = (float *)&z[60];
+    float *z_end = z1 + 32;
+
+     /* loop unrolled 4 times */
+    __asm__ volatile (
+        "lui    %[Temp5],   0x8000                  \n\t"
+    "1:                                             \n\t"
+        "lw     %[Temp1],   0(%[z2])                \n\t"
+        "lw     %[Temp2],   4(%[z2])                \n\t"
+        "lw     %[Temp3],   8(%[z2])                \n\t"
+        "lw     %[Temp4],   12(%[z2])               \n\t"
+        "xor    %[Temp1],   %[Temp1],   %[Temp5]    \n\t"
+        "xor    %[Temp2],   %[Temp2],   %[Temp5]    \n\t"
+        "xor    %[Temp3],   %[Temp3],   %[Temp5]    \n\t"
+        "xor    %[Temp4],   %[Temp4],   %[Temp5]    \n\t"
+        "addiu  %[z2],      %[z2],      -16         \n\t"
+        "sw     %[Temp1],   24(%[W_ptr])            \n\t"
+        "sw     %[Temp2],   16(%[W_ptr])            \n\t"
+        "sw     %[Temp3],   8(%[W_ptr])             \n\t"
+        "sw     %[Temp4],   0(%[W_ptr])             \n\t"
+        "lw     %[Temp1],   0(%[z1])                \n\t"
+        "lw     %[Temp2],   4(%[z1])                \n\t"
+        "lw     %[Temp3],   8(%[z1])                \n\t"
+        "lw     %[Temp4],   12(%[z1])               \n\t"
+        "sw     %[Temp1],   4(%[W_ptr])             \n\t"
+        "sw     %[Temp2],   12(%[W_ptr])            \n\t"
+        "sw     %[Temp3],   20(%[W_ptr])            \n\t"
+        "sw     %[Temp4],   28(%[W_ptr])            \n\t"
+        "addiu  %[z1],      %[z1],      16          \n\t"
+        "addiu  %[W_ptr],   %[W_ptr],   32          \n\t"
+        "bne    %[z1],      %[z_end],   1b          \n\t"
+
+        : [Temp1]"=&r"(Temp1), [Temp2]"=&r"(Temp2),
+          [Temp3]"=&r"(Temp3), [Temp4]"=&r"(Temp4),
+          [Temp5]"=&r"(Temp5), [z1]"+r"(z1),
+          [z2]"+r"(z2), [W_ptr]"+r"(W_ptr)
+        : [z_end]"r"(z_end)
+        : "memory"
+    );
+}
+
+#if HAVE_MIPSFPU
+static void sbr_sum64x5_mips(float *z)
+{
+    int k;
+    float *z1;
+    float f1, f2, f3, f4, f5, f6, f7, f8;
+    for (k = 0; k < 64; k += 8) {
+
+        z1 = &z[k];
+
+         /* loop unrolled 8 times */
+        __asm__ volatile (
+            "lwc1   $f0,    0(%[z1])        \n\t"
+            "lwc1   $f1,    256(%[z1])      \n\t"
+            "lwc1   $f2,    4(%[z1])        \n\t"
+            "lwc1   $f3,    260(%[z1])      \n\t"
+            "lwc1   $f4,    8(%[z1])        \n\t"
+            "add.s  %[f1],  $f0,    $f1     \n\t"
+            "lwc1   $f5,    264(%[z1])      \n\t"
+            "add.s  %[f2],  $f2,    $f3     \n\t"
+            "lwc1   $f6,    12(%[z1])       \n\t"
+            "lwc1   $f7,    268(%[z1])      \n\t"
+            "add.s  %[f3],  $f4,    $f5     \n\t"
+            "lwc1   $f8,    16(%[z1])       \n\t"
+            "lwc1   $f9,    272(%[z1])      \n\t"
+            "add.s  %[f4],  $f6,    $f7     \n\t"
+            "lwc1   $f10,   20(%[z1])       \n\t"
+            "lwc1   $f11,   276(%[z1])      \n\t"
+            "add.s  %[f5],  $f8,    $f9     \n\t"
+            "lwc1   $f12,   24(%[z1])       \n\t"
+            "lwc1   $f13,   280(%[z1])      \n\t"
+            "add.s  %[f6],  $f10,   $f11    \n\t"
+            "lwc1   $f14,   28(%[z1])       \n\t"
+            "lwc1   $f15,   284(%[z1])      \n\t"
+            "add.s  %[f7],  $f12,   $f13    \n\t"
+            "lwc1   $f0,    512(%[z1])      \n\t"
+            "lwc1   $f1,    516(%[z1])      \n\t"
+            "add.s  %[f8],  $f14,   $f15    \n\t"
+            "lwc1   $f2,    520(%[z1])      \n\t"
+            "add.s  %[f1],  %[f1],  $f0     \n\t"
+            "add.s  %[f2],  %[f2],  $f1     \n\t"
+            "lwc1   $f3,    524(%[z1])      \n\t"
+            "add.s  %[f3],  %[f3],  $f2     \n\t"
+            "lwc1   $f4,    528(%[z1])      \n\t"
+            "lwc1   $f5,    532(%[z1])      \n\t"
+            "add.s  %[f4],  %[f4],  $f3     \n\t"
+            "lwc1   $f6,    536(%[z1])      \n\t"
+            "add.s  %[f5],  %[f5],  $f4     \n\t"
+            "add.s  %[f6],  %[f6],  $f5     \n\t"
+            "lwc1   $f7,    540(%[z1])      \n\t"
+            "add.s  %[f7],  %[f7],  $f6     \n\t"
+            "lwc1   $f0,    768(%[z1])      \n\t"
+            "lwc1   $f1,    772(%[z1])      \n\t"
+            "add.s  %[f8],  %[f8],  $f7     \n\t"
+            "lwc1   $f2,    776(%[z1])      \n\t"
+            "add.s  %[f1],  %[f1],  $f0     \n\t"
+            "add.s  %[f2],  %[f2],  $f1     \n\t"
+            "lwc1   $f3,    780(%[z1])      \n\t"
+            "add.s  %[f3],  %[f3],  $f2     \n\t"
+            "lwc1   $f4,    784(%[z1])      \n\t"
+            "lwc1   $f5,    788(%[z1])      \n\t"
+            "add.s  %[f4],  %[f4],  $f3     \n\t"
+            "lwc1   $f6,    792(%[z1])      \n\t"
+            "add.s  %[f5],  %[f5],  $f4     \n\t"
+            "add.s  %[f6],  %[f6],  $f5     \n\t"
+            "lwc1   $f7,    796(%[z1])      \n\t"
+            "add.s  %[f7],  %[f7],  $f6     \n\t"
+            "lwc1   $f0,    1024(%[z1])     \n\t"
+            "lwc1   $f1,    1028(%[z1])     \n\t"
+            "add.s  %[f8],  %[f8],  $f7     \n\t"
+            "lwc1   $f2,    1032(%[z1])     \n\t"
+            "add.s  %[f1],  %[f1],  $f0     \n\t"
+            "add.s  %[f2],  %[f2],  $f1     \n\t"
+            "lwc1   $f3,    1036(%[z1])     \n\t"
+            "add.s  %[f3],  %[f3],  $f2     \n\t"
+            "lwc1   $f4,    1040(%[z1])     \n\t"
+            "lwc1   $f5,    1044(%[z1])     \n\t"
+            "add.s  %[f4],  %[f4],  $f3     \n\t"
+            "lwc1   $f6,    1048(%[z1])     \n\t"
+            "add.s  %[f5],  %[f5],  $f4     \n\t"
+            "add.s  %[f6],  %[f6],  $f5     \n\t"
+            "lwc1   $f7,    1052(%[z1])     \n\t"
+            "add.s  %[f7],  %[f7],  $f6     \n\t"
+            "swc1   %[f1],  0(%[z1])        \n\t"
+            "swc1   %[f2],  4(%[z1])        \n\t"
+            "add.s  %[f8],  %[f8],  $f7     \n\t"
+            "swc1   %[f3],  8(%[z1])        \n\t"
+            "swc1   %[f4],  12(%[z1])       \n\t"
+            "swc1   %[f5],  16(%[z1])       \n\t"
+            "swc1   %[f6],  20(%[z1])       \n\t"
+            "swc1   %[f7],  24(%[z1])       \n\t"
+            "swc1   %[f8],  28(%[z1])       \n\t"
+
+            : [f1]"=&f"(f1), [f2]"=&f"(f2), [f3]"=&f"(f3),
+              [f4]"=&f"(f4), [f5]"=&f"(f5), [f6]"=&f"(f6),
+              [f7]"=&f"(f7), [f8]"=&f"(f8)
+            : [z1]"r"(z1)
+            : "$f0", "$f1", "$f2", "$f3", "$f4", "$f5",
+              "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
+              "$f12", "$f13", "$f14", "$f15",
+              "memory"
+        );
+    }
+}
+
+static float sbr_sum_square_mips(float (*x)[2], int n)
+{
+    float sum0 = 0.0f, sum1 = 0.0f;
+    float *p_x;
+    float temp0, temp1, temp2, temp3;
+    float *loop_end;
+    p_x = &x[0][0];
+    loop_end = p_x + (n >> 1)*4 - 4;
+
+    __asm__ volatile (
+        ".set      push                                             \n\t"
+        ".set      noreorder                                        \n\t"
+        "lwc1      %[temp0],   0(%[p_x])                            \n\t"
+        "lwc1      %[temp1],   4(%[p_x])                            \n\t"
+        "lwc1      %[temp2],   8(%[p_x])                            \n\t"
+        "lwc1      %[temp3],   12(%[p_x])                           \n\t"
+    "1:                                                             \n\t"
+        "addiu     %[p_x],     %[p_x],       16                     \n\t"
+        "madd.s    %[sum0],    %[sum0],      %[temp0],   %[temp0]   \n\t"
+        "lwc1      %[temp0],   0(%[p_x])                            \n\t"
+        "madd.s    %[sum1],    %[sum1],      %[temp1],   %[temp1]   \n\t"
+        "lwc1      %[temp1],   4(%[p_x])                            \n\t"
+        "madd.s    %[sum0],    %[sum0],      %[temp2],   %[temp2]   \n\t"
+        "lwc1      %[temp2],   8(%[p_x])                            \n\t"
+        "madd.s    %[sum1],    %[sum1],      %[temp3],   %[temp3]   \n\t"
+        "bne       %[p_x],     %[loop_end],  1b                     \n\t"
+        " lwc1     %[temp3],   12(%[p_x])                           \n\t"
+        "madd.s    %[sum0],    %[sum0],      %[temp0],   %[temp0]   \n\t"
+        "madd.s    %[sum1],    %[sum1],      %[temp1],   %[temp1]   \n\t"
+        "madd.s    %[sum0],    %[sum0],      %[temp2],   %[temp2]   \n\t"
+        "madd.s    %[sum1],    %[sum1],      %[temp3],   %[temp3]   \n\t"
+        ".set      pop                                              \n\t"
+
+        : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+          [temp3]"=&f"(temp3), [sum0]"+f"(sum0), [sum1]"+f"(sum1),
+          [p_x]"+r"(p_x)
+        : [loop_end]"r"(loop_end)
+        : "memory"
+    );
+    return sum0 + sum1;
+}
+
+static void sbr_qmf_deint_bfly_mips(float *v, const float *src0, const float *src1)
+{
+    int i;
+    float temp0, temp1, temp2, temp3, temp4, temp5;
+    float temp6, temp7, temp8, temp9, temp10, temp11;
+    float *v0 = v;
+    float *v1 = &v[127];
+    float *psrc0 = (float*)src0;
+    float *psrc1 = (float*)&src1[63];
+
+    for (i = 0; i < 4; i++) {
+
+         /* loop unrolled 16 times */
+        __asm__ volatile(
+            "lwc1       %[temp0],   0(%[src0])             \n\t"
+            "lwc1       %[temp1],   0(%[src1])             \n\t"
+            "lwc1       %[temp3],   4(%[src0])             \n\t"
+            "lwc1       %[temp4],   -4(%[src1])            \n\t"
+            "lwc1       %[temp6],   8(%[src0])             \n\t"
+            "lwc1       %[temp7],   -8(%[src1])            \n\t"
+            "lwc1       %[temp9],   12(%[src0])            \n\t"
+            "lwc1       %[temp10],  -12(%[src1])           \n\t"
+            "add.s      %[temp2],   %[temp0],   %[temp1]   \n\t"
+            "add.s      %[temp5],   %[temp3],   %[temp4]   \n\t"
+            "add.s      %[temp8],   %[temp6],   %[temp7]   \n\t"
+            "add.s      %[temp11],  %[temp9],   %[temp10]  \n\t"
+            "sub.s      %[temp0],   %[temp0],   %[temp1]   \n\t"
+            "sub.s      %[temp3],   %[temp3],   %[temp4]   \n\t"
+            "sub.s      %[temp6],   %[temp6],   %[temp7]   \n\t"
+            "sub.s      %[temp9],   %[temp9],   %[temp10]  \n\t"
+            "swc1       %[temp2],   0(%[v1])               \n\t"
+            "swc1       %[temp0],   0(%[v0])               \n\t"
+            "swc1       %[temp5],   -4(%[v1])              \n\t"
+            "swc1       %[temp3],   4(%[v0])               \n\t"
+            "swc1       %[temp8],   -8(%[v1])              \n\t"
+            "swc1       %[temp6],   8(%[v0])               \n\t"
+            "swc1       %[temp11],  -12(%[v1])             \n\t"
+            "swc1       %[temp9],   12(%[v0])              \n\t"
+            "lwc1       %[temp0],   16(%[src0])            \n\t"
+            "lwc1       %[temp1],   -16(%[src1])           \n\t"
+            "lwc1       %[temp3],   20(%[src0])            \n\t"
+            "lwc1       %[temp4],   -20(%[src1])           \n\t"
+            "lwc1       %[temp6],   24(%[src0])            \n\t"
+            "lwc1       %[temp7],   -24(%[src1])           \n\t"
+            "lwc1       %[temp9],   28(%[src0])            \n\t"
+            "lwc1       %[temp10],  -28(%[src1])           \n\t"
+            "add.s      %[temp2],   %[temp0],   %[temp1]   \n\t"
+            "add.s      %[temp5],   %[temp3],   %[temp4]   \n\t"
+            "add.s      %[temp8],   %[temp6],   %[temp7]   \n\t"
+            "add.s      %[temp11],  %[temp9],   %[temp10]  \n\t"
+            "sub.s      %[temp0],   %[temp0],   %[temp1]   \n\t"
+            "sub.s      %[temp3],   %[temp3],   %[temp4]   \n\t"
+            "sub.s      %[temp6],   %[temp6],   %[temp7]   \n\t"
+            "sub.s      %[temp9],   %[temp9],   %[temp10]  \n\t"
+            "swc1       %[temp2],   -16(%[v1])             \n\t"
+            "swc1       %[temp0],   16(%[v0])              \n\t"
+            "swc1       %[temp5],   -20(%[v1])             \n\t"
+            "swc1       %[temp3],   20(%[v0])              \n\t"
+            "swc1       %[temp8],   -24(%[v1])             \n\t"
+            "swc1       %[temp6],   24(%[v0])              \n\t"
+            "swc1       %[temp11],  -28(%[v1])             \n\t"
+            "swc1       %[temp9],   28(%[v0])              \n\t"
+            "lwc1       %[temp0],   32(%[src0])            \n\t"
+            "lwc1       %[temp1],   -32(%[src1])           \n\t"
+            "lwc1       %[temp3],   36(%[src0])            \n\t"
+            "lwc1       %[temp4],   -36(%[src1])           \n\t"
+            "lwc1       %[temp6],   40(%[src0])            \n\t"
+            "lwc1       %[temp7],   -40(%[src1])           \n\t"
+            "lwc1       %[temp9],   44(%[src0])            \n\t"
+            "lwc1       %[temp10],  -44(%[src1])           \n\t"
+            "add.s      %[temp2],   %[temp0],   %[temp1]   \n\t"
+            "add.s      %[temp5],   %[temp3],   %[temp4]   \n\t"
+            "add.s      %[temp8],   %[temp6],   %[temp7]   \n\t"
+            "add.s      %[temp11],  %[temp9],   %[temp10]  \n\t"
+            "sub.s      %[temp0],   %[temp0],   %[temp1]   \n\t"
+            "sub.s      %[temp3],   %[temp3],   %[temp4]   \n\t"
+            "sub.s      %[temp6],   %[temp6],   %[temp7]   \n\t"
+            "sub.s      %[temp9],   %[temp9],   %[temp10]  \n\t"
+            "swc1       %[temp2],   -32(%[v1])             \n\t"
+            "swc1       %[temp0],   32(%[v0])              \n\t"
+            "swc1       %[temp5],   -36(%[v1])             \n\t"
+            "swc1       %[temp3],   36(%[v0])              \n\t"
+            "swc1       %[temp8],   -40(%[v1])             \n\t"
+            "swc1       %[temp6],   40(%[v0])              \n\t"
+            "swc1       %[temp11],  -44(%[v1])             \n\t"
+            "swc1       %[temp9],   44(%[v0])              \n\t"
+            "lwc1       %[temp0],   48(%[src0])            \n\t"
+            "lwc1       %[temp1],   -48(%[src1])           \n\t"
+            "lwc1       %[temp3],   52(%[src0])            \n\t"
+            "lwc1       %[temp4],   -52(%[src1])           \n\t"
+            "lwc1       %[temp6],   56(%[src0])            \n\t"
+            "lwc1       %[temp7],   -56(%[src1])           \n\t"
+            "lwc1       %[temp9],   60(%[src0])            \n\t"
+            "lwc1       %[temp10],  -60(%[src1])           \n\t"
+            "add.s      %[temp2],   %[temp0],   %[temp1]   \n\t"
+            "add.s      %[temp5],   %[temp3],   %[temp4]   \n\t"
+            "add.s      %[temp8],   %[temp6],   %[temp7]   \n\t"
+            "add.s      %[temp11],  %[temp9],   %[temp10]  \n\t"
+            "sub.s      %[temp0],   %[temp0],   %[temp1]   \n\t"
+            "sub.s      %[temp3],   %[temp3],   %[temp4]   \n\t"
+            "sub.s      %[temp6],   %[temp6],   %[temp7]   \n\t"
+            "sub.s      %[temp9],   %[temp9],   %[temp10]  \n\t"
+            "swc1       %[temp2],   -48(%[v1])             \n\t"
+            "swc1       %[temp0],   48(%[v0])              \n\t"
+            "swc1       %[temp5],   -52(%[v1])             \n\t"
+            "swc1       %[temp3],   52(%[v0])              \n\t"
+            "swc1       %[temp8],   -56(%[v1])             \n\t"
+            "swc1       %[temp6],   56(%[v0])              \n\t"
+            "swc1       %[temp11],  -60(%[v1])             \n\t"
+            "swc1       %[temp9],   60(%[v0])              \n\t"
+            "addiu      %[src0],    %[src0],    64         \n\t"
+            "addiu      %[src1],    %[src1],    -64        \n\t"
+            "addiu      %[v0],      %[v0],      64         \n\t"
+            "addiu      %[v1],      %[v1],      -64        \n\t"
+
+            : [v0]"+r"(v0), [v1]"+r"(v1), [src0]"+r"(psrc0), [src1]"+r"(psrc1),
+              [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+              [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+              [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+              [temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11)
+            :
+            :"memory"
+        );
+    }
+}
+
+static void sbr_autocorrelate_mips(const float x[40][2], float phi[3][2][2])
+{
+    int i;
+    float real_sum_0 = 0.0f;
+    float real_sum_1 = 0.0f;
+    float real_sum_2 = 0.0f;
+    float imag_sum_1 = 0.0f;
+    float imag_sum_2 = 0.0f;
+    float *p_x, *p_phi;
+    float temp0, temp1, temp2, temp3, temp4, temp5, temp6;
+    float temp7, temp_r, temp_r1, temp_r2, temp_r3, temp_r4;
+    p_x = (float*)&x[0][0];
+    p_phi = &phi[0][0][0];
+
+    __asm__ volatile (
+        "lwc1    %[temp0],      8(%[p_x])                           \n\t"
+        "lwc1    %[temp1],      12(%[p_x])                          \n\t"
+        "lwc1    %[temp2],      16(%[p_x])                          \n\t"
+        "lwc1    %[temp3],      20(%[p_x])                          \n\t"
+        "lwc1    %[temp4],      24(%[p_x])                          \n\t"
+        "lwc1    %[temp5],      28(%[p_x])                          \n\t"
+        "mul.s   %[temp_r],     %[temp1],      %[temp1]             \n\t"
+        "mul.s   %[temp_r1],    %[temp1],      %[temp3]             \n\t"
+        "mul.s   %[temp_r2],    %[temp1],      %[temp2]             \n\t"
+        "mul.s   %[temp_r3],    %[temp1],      %[temp5]             \n\t"
+        "mul.s   %[temp_r4],    %[temp1],      %[temp4]             \n\t"
+        "madd.s  %[temp_r],     %[temp_r],     %[temp0],  %[temp0]  \n\t"
+        "madd.s  %[temp_r1],    %[temp_r1],    %[temp0],  %[temp2]  \n\t"
+        "msub.s  %[temp_r2],    %[temp_r2],    %[temp0],  %[temp3]  \n\t"
+        "madd.s  %[temp_r3],    %[temp_r3],    %[temp0],  %[temp4]  \n\t"
+        "msub.s  %[temp_r4],    %[temp_r4],    %[temp0],  %[temp5]  \n\t"
+        "add.s   %[real_sum_0], %[real_sum_0], %[temp_r]            \n\t"
+        "add.s   %[real_sum_1], %[real_sum_1], %[temp_r1]           \n\t"
+        "add.s   %[imag_sum_1], %[imag_sum_1], %[temp_r2]           \n\t"
+        "add.s   %[real_sum_2], %[real_sum_2], %[temp_r3]           \n\t"
+        "add.s   %[imag_sum_2], %[imag_sum_2], %[temp_r4]           \n\t"
+        "addiu   %[p_x],        %[p_x],        8                    \n\t"
+
+        : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+          [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+          [real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
+          [imag_sum_1]"+f"(imag_sum_1), [real_sum_2]"+f"(real_sum_2),
+          [temp_r]"=&f"(temp_r), [temp_r1]"=&f"(temp_r1), [temp_r2]"=&f"(temp_r2),
+          [temp_r3]"=&f"(temp_r3), [temp_r4]"=&f"(temp_r4),
+          [p_x]"+r"(p_x), [imag_sum_2]"+f"(imag_sum_2)
+        :
+        : "memory"
+    );
+
+    for (i = 0; i < 12; i++) {
+        __asm__ volatile (
+            "lwc1    %[temp0],      8(%[p_x])                           \n\t"
+            "lwc1    %[temp1],      12(%[p_x])                          \n\t"
+            "lwc1    %[temp2],      16(%[p_x])                          \n\t"
+            "lwc1    %[temp3],      20(%[p_x])                          \n\t"
+            "lwc1    %[temp4],      24(%[p_x])                          \n\t"
+            "lwc1    %[temp5],      28(%[p_x])                          \n\t"
+            "mul.s   %[temp_r],     %[temp1],      %[temp1]             \n\t"
+            "mul.s   %[temp_r1],    %[temp1],      %[temp3]             \n\t"
+            "mul.s   %[temp_r2],    %[temp1],      %[temp2]             \n\t"
+            "mul.s   %[temp_r3],    %[temp1],      %[temp5]             \n\t"
+            "mul.s   %[temp_r4],    %[temp1],      %[temp4]             \n\t"
+            "madd.s  %[temp_r],     %[temp_r],     %[temp0],  %[temp0]  \n\t"
+            "madd.s  %[temp_r1],    %[temp_r1],    %[temp0],  %[temp2]  \n\t"
+            "msub.s  %[temp_r2],    %[temp_r2],    %[temp0],  %[temp3]  \n\t"
+            "madd.s  %[temp_r3],    %[temp_r3],    %[temp0],  %[temp4]  \n\t"
+            "msub.s  %[temp_r4],    %[temp_r4],    %[temp0],  %[temp5]  \n\t"
+            "add.s   %[real_sum_0], %[real_sum_0], %[temp_r]            \n\t"
+            "add.s   %[real_sum_1], %[real_sum_1], %[temp_r1]           \n\t"
+            "add.s   %[imag_sum_1], %[imag_sum_1], %[temp_r2]           \n\t"
+            "add.s   %[real_sum_2], %[real_sum_2], %[temp_r3]           \n\t"
+            "add.s   %[imag_sum_2], %[imag_sum_2], %[temp_r4]           \n\t"
+            "lwc1    %[temp0],      32(%[p_x])                          \n\t"
+            "lwc1    %[temp1],      36(%[p_x])                          \n\t"
+            "mul.s   %[temp_r],     %[temp3],      %[temp3]             \n\t"
+            "mul.s   %[temp_r1],    %[temp3],      %[temp5]             \n\t"
+            "mul.s   %[temp_r2],    %[temp3],      %[temp4]             \n\t"
+            "mul.s   %[temp_r3],    %[temp3],      %[temp1]             \n\t"
+            "mul.s   %[temp_r4],    %[temp3],      %[temp0]             \n\t"
+            "madd.s  %[temp_r],     %[temp_r],     %[temp2],  %[temp2]  \n\t"
+            "madd.s  %[temp_r1],    %[temp_r1],    %[temp2],  %[temp4]  \n\t"
+            "msub.s  %[temp_r2],    %[temp_r2],    %[temp2],  %[temp5]  \n\t"
+            "madd.s  %[temp_r3],    %[temp_r3],    %[temp2],  %[temp0]  \n\t"
+            "msub.s  %[temp_r4],    %[temp_r4],    %[temp2],  %[temp1]  \n\t"
+            "add.s   %[real_sum_0], %[real_sum_0], %[temp_r]            \n\t"
+            "add.s   %[real_sum_1], %[real_sum_1], %[temp_r1]           \n\t"
+            "add.s   %[imag_sum_1], %[imag_sum_1], %[temp_r2]           \n\t"
+            "add.s   %[real_sum_2], %[real_sum_2], %[temp_r3]           \n\t"
+            "add.s   %[imag_sum_2], %[imag_sum_2], %[temp_r4]           \n\t"
+            "lwc1    %[temp2],      40(%[p_x])                          \n\t"
+            "lwc1    %[temp3],      44(%[p_x])                          \n\t"
+            "mul.s   %[temp_r],     %[temp5],      %[temp5]             \n\t"
+            "mul.s   %[temp_r1],    %[temp5],      %[temp1]             \n\t"
+            "mul.s   %[temp_r2],    %[temp5],      %[temp0]             \n\t"
+            "mul.s   %[temp_r3],    %[temp5],      %[temp3]             \n\t"
+            "mul.s   %[temp_r4],    %[temp5],      %[temp2]             \n\t"
+            "madd.s  %[temp_r],     %[temp_r],     %[temp4],  %[temp4]  \n\t"
+            "madd.s  %[temp_r1],    %[temp_r1],    %[temp4],  %[temp0]  \n\t"
+            "msub.s  %[temp_r2],    %[temp_r2],    %[temp4],  %[temp1]  \n\t"
+            "madd.s  %[temp_r3],    %[temp_r3],    %[temp4],  %[temp2]  \n\t"
+            "msub.s  %[temp_r4],    %[temp_r4],    %[temp4],  %[temp3]  \n\t"
+            "add.s   %[real_sum_0], %[real_sum_0], %[temp_r]            \n\t"
+            "add.s   %[real_sum_1], %[real_sum_1], %[temp_r1]           \n\t"
+            "add.s   %[imag_sum_1], %[imag_sum_1], %[temp_r2]           \n\t"
+            "add.s   %[real_sum_2], %[real_sum_2], %[temp_r3]           \n\t"
+            "add.s   %[imag_sum_2], %[imag_sum_2], %[temp_r4]           \n\t"
+            "addiu   %[p_x],        %[p_x],        24                   \n\t"
+
+            : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+              [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+              [real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
+              [imag_sum_1]"+f"(imag_sum_1), [real_sum_2]"+f"(real_sum_2),
+              [temp_r]"=&f"(temp_r), [temp_r1]"=&f"(temp_r1),
+              [temp_r2]"=&f"(temp_r2), [temp_r3]"=&f"(temp_r3),
+              [temp_r4]"=&f"(temp_r4), [p_x]"+r"(p_x),
+              [imag_sum_2]"+f"(imag_sum_2)
+            :
+            : "memory"
+        );
+    }
+    __asm__ volatile (
+        "lwc1    %[temp0],    -296(%[p_x])                        \n\t"
+        "lwc1    %[temp1],    -292(%[p_x])                        \n\t"
+        "lwc1    %[temp2],    8(%[p_x])                           \n\t"
+        "lwc1    %[temp3],    12(%[p_x])                          \n\t"
+        "lwc1    %[temp4],    -288(%[p_x])                        \n\t"
+        "lwc1    %[temp5],    -284(%[p_x])                        \n\t"
+        "lwc1    %[temp6],    -280(%[p_x])                        \n\t"
+        "lwc1    %[temp7],    -276(%[p_x])                        \n\t"
+        "madd.s  %[temp_r],   %[real_sum_0], %[temp0],  %[temp0]  \n\t"
+        "madd.s  %[temp_r1],  %[real_sum_0], %[temp2],  %[temp2]  \n\t"
+        "madd.s  %[temp_r2],  %[real_sum_1], %[temp0],  %[temp4]  \n\t"
+        "madd.s  %[temp_r3],  %[imag_sum_1], %[temp0],  %[temp5]  \n\t"
+        "madd.s  %[temp_r],   %[temp_r],     %[temp1],  %[temp1]  \n\t"
+        "madd.s  %[temp_r1],  %[temp_r1],    %[temp3],  %[temp3]  \n\t"
+        "madd.s  %[temp_r2],  %[temp_r2],    %[temp1],  %[temp5]  \n\t"
+        "nmsub.s  %[temp_r3], %[temp_r3],    %[temp1],  %[temp4]  \n\t"
+        "lwc1    %[temp4],    16(%[p_x])                          \n\t"
+        "lwc1    %[temp5],    20(%[p_x])                          \n\t"
+        "swc1    %[temp_r],   40(%[p_phi])                        \n\t"
+        "swc1    %[temp_r1],  16(%[p_phi])                        \n\t"
+        "swc1    %[temp_r2],  24(%[p_phi])                        \n\t"
+        "swc1    %[temp_r3],  28(%[p_phi])                        \n\t"
+        "madd.s  %[temp_r],   %[real_sum_1], %[temp2],  %[temp4]  \n\t"
+        "madd.s  %[temp_r1],  %[imag_sum_1], %[temp2],  %[temp5]  \n\t"
+        "madd.s  %[temp_r2],  %[real_sum_2], %[temp0],  %[temp6]  \n\t"
+        "madd.s  %[temp_r3],  %[imag_sum_2], %[temp0],  %[temp7]  \n\t"
+        "madd.s  %[temp_r],   %[temp_r],     %[temp3],  %[temp5]  \n\t"
+        "nmsub.s %[temp_r1],  %[temp_r1],    %[temp3],  %[temp4]  \n\t"
+        "madd.s  %[temp_r2],  %[temp_r2],    %[temp1],  %[temp7]  \n\t"
+        "nmsub.s %[temp_r3],  %[temp_r3],    %[temp1],  %[temp6]  \n\t"
+        "swc1    %[temp_r],   0(%[p_phi])                         \n\t"
+        "swc1    %[temp_r1],  4(%[p_phi])                         \n\t"
+        "swc1    %[temp_r2],  8(%[p_phi])                         \n\t"
+        "swc1    %[temp_r3],  12(%[p_phi])                        \n\t"
+
+        : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+          [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+          [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp_r]"=&f"(temp_r),
+          [real_sum_0]"+f"(real_sum_0), [real_sum_1]"+f"(real_sum_1),
+          [real_sum_2]"+f"(real_sum_2), [imag_sum_1]"+f"(imag_sum_1),
+          [temp_r2]"=&f"(temp_r2), [temp_r3]"=&f"(temp_r3),
+          [temp_r1]"=&f"(temp_r1), [p_phi]"+r"(p_phi),
+          [imag_sum_2]"+f"(imag_sum_2)
+        : [p_x]"r"(p_x)
+        : "memory"
+    );
+}
+
+static void sbr_hf_gen_mips(float (*X_high)[2], const float (*X_low)[2],
+                         const float alpha0[2], const float alpha1[2],
+                         float bw, int start, int end)
+{
+    float alpha[4];
+    int i;
+    float *p_x_low = (float*)&X_low[0][0] + 2*start;
+    float *p_x_high = &X_high[0][0] + 2*start;
+    float temp0, temp1, temp2, temp3, temp4, temp5, temp6;
+    float temp7, temp8, temp9, temp10, temp11, temp12;
+
+    alpha[0] = alpha1[0] * bw * bw;
+    alpha[1] = alpha1[1] * bw * bw;
+    alpha[2] = alpha0[0] * bw;
+    alpha[3] = alpha0[1] * bw;
+
+    for (i = start; i < end; i++) {
+        __asm__ volatile (
+            "lwc1    %[temp0],    -16(%[p_x_low])                        \n\t"
+            "lwc1    %[temp1],    -12(%[p_x_low])                        \n\t"
+            "lwc1    %[temp2],    -8(%[p_x_low])                         \n\t"
+            "lwc1    %[temp3],    -4(%[p_x_low])                         \n\t"
+            "lwc1    %[temp5],    0(%[p_x_low])                          \n\t"
+            "lwc1    %[temp6],    4(%[p_x_low])                          \n\t"
+            "lwc1    %[temp7],    0(%[alpha])                            \n\t"
+            "lwc1    %[temp8],    4(%[alpha])                            \n\t"
+            "lwc1    %[temp9],    8(%[alpha])                            \n\t"
+            "lwc1    %[temp10],   12(%[alpha])                           \n\t"
+            "addiu   %[p_x_high], %[p_x_high],     8                     \n\t"
+            "addiu   %[p_x_low],  %[p_x_low],      8                     \n\t"
+            "mul.s   %[temp11],   %[temp1],        %[temp8]              \n\t"
+            "msub.s  %[temp11],   %[temp11],       %[temp0],  %[temp7]   \n\t"
+            "madd.s  %[temp11],   %[temp11],       %[temp2],  %[temp9]   \n\t"
+            "nmsub.s %[temp11],   %[temp11],       %[temp3],  %[temp10]  \n\t"
+            "add.s   %[temp11],   %[temp11],       %[temp5]              \n\t"
+            "swc1    %[temp11],   -8(%[p_x_high])                        \n\t"
+            "mul.s   %[temp12],   %[temp1],        %[temp7]              \n\t"
+            "madd.s  %[temp12],   %[temp12],       %[temp0],  %[temp8]   \n\t"
+            "madd.s  %[temp12],   %[temp12],       %[temp3],  %[temp9]   \n\t"
+            "madd.s  %[temp12],   %[temp12],       %[temp2],  %[temp10]  \n\t"
+            "add.s   %[temp12],   %[temp12],       %[temp6]              \n\t"
+            "swc1    %[temp12],   -4(%[p_x_high])                        \n\t"
+
+            : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+              [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5),
+              [temp6]"=&f"(temp6), [temp7]"=&f"(temp7), [temp8]"=&f"(temp8),
+              [temp9]"=&f"(temp9), [temp10]"=&f"(temp10), [temp11]"=&f"(temp11),
+              [temp12]"=&f"(temp12), [p_x_high]"+r"(p_x_high),
+              [p_x_low]"+r"(p_x_low)
+            : [alpha]"r"(alpha)
+            : "memory"
+        );
+    }
+}
+
+static void sbr_hf_g_filt_mips(float (*Y)[2], const float (*X_high)[40][2],
+                            const float *g_filt, int m_max, intptr_t ixh)
+{
+    float *p_y, *p_x, *p_g;
+    float temp0, temp1, temp2;
+    int loop_end;
+
+    p_g = (float*)&g_filt[0];
+    p_y = &Y[0][0];
+    p_x = (float*)&X_high[0][ixh][0];
+    loop_end = (int)((int*)p_g + m_max);
+
+    __asm__ volatile(
+        ".set    push                                \n\t"
+        ".set    noreorder                           \n\t"
+    "1:                                              \n\t"
+        "lwc1    %[temp0],   0(%[p_g])               \n\t"
+        "lwc1    %[temp1],   0(%[p_x])               \n\t"
+        "lwc1    %[temp2],   4(%[p_x])               \n\t"
+        "mul.s   %[temp1],   %[temp1],     %[temp0]  \n\t"
+        "mul.s   %[temp2],   %[temp2],     %[temp0]  \n\t"
+        "addiu   %[p_g],     %[p_g],       4         \n\t"
+        "addiu   %[p_x],     %[p_x],       320       \n\t"
+        "swc1    %[temp1],   0(%[p_y])               \n\t"
+        "swc1    %[temp2],   4(%[p_y])               \n\t"
+        "bne     %[p_g],     %[loop_end],  1b        \n\t"
+        " addiu  %[p_y],     %[p_y],       8         \n\t"
+        ".set    pop                                 \n\t"
+
+        : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),
+          [temp2]"=&f"(temp2), [p_x]"+r"(p_x),
+          [p_y]"+r"(p_y), [p_g]"+r"(p_g)
+        : [loop_end]"r"(loop_end)
+        : "memory"
+    );
+}
+
+static void sbr_hf_apply_noise_0_mips(float (*Y)[2], const float *s_m,
+                                 const float *q_filt, int noise,
+                                 int kx, int m_max)
+{
+    int m;
+
+    for (m = 0; m < m_max; m++){
+
+        float *Y1=&Y[m][0];
+        float *ff_table;
+        float y0,y1, temp1, temp2, temp4, temp5;
+        int temp0, temp3;
+        const float *s_m1=&s_m[m];
+        const float *q_filt1= &q_filt[m];
+
+        __asm__ volatile(
+            "lwc1    %[y0],       0(%[Y1])                                    \n\t"
+            "lwc1    %[temp1],    0(%[s_m1])                                  \n\t"
+            "addiu   %[noise],    %[noise],              1                    \n\t"
+            "andi    %[noise],    %[noise],              0x1ff                \n\t"
+            "sll     %[temp0],    %[noise], 3                                 \n\t"
+            "addu    %[ff_table], %[ff_sbr_noise_table], %[temp0]             \n\t"
+            "add.s   %[y0],       %[y0],                 %[temp1]             \n\t"
+            "mfc1    %[temp3],    %[temp1]                                    \n\t"
+            "bne     %[temp3],    $0,                    1f                   \n\t"
+            "lwc1    %[y1],       4(%[Y1])                                    \n\t"
+            "lwc1    %[temp2],    0(%[q_filt1])                               \n\t"
+            "lwc1    %[temp4],    0(%[ff_table])                              \n\t"
+            "lwc1    %[temp5],    4(%[ff_table])                              \n\t"
+            "madd.s  %[y0],       %[y0],                 %[temp2],  %[temp4]  \n\t"
+            "madd.s  %[y1],       %[y1],                 %[temp2],  %[temp5]  \n\t"
+            "swc1    %[y1],       4(%[Y1])                                    \n\t"
+        "1:                                                                   \n\t"
+            "swc1    %[y0],       0(%[Y1])                                    \n\t"
+
+            : [ff_table]"=&r"(ff_table), [y0]"=&f"(y0), [y1]"=&f"(y1),
+              [temp0]"=&r"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+              [temp3]"=&r"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
+            : [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
+              [Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1)
+            : "memory"
+        );
+    }
+}
+
+static void sbr_hf_apply_noise_1_mips(float (*Y)[2], const float *s_m,
+                                 const float *q_filt, int noise,
+                                 int kx, int m_max)
+{
+    float y0,y1,temp1, temp2, temp4, temp5;
+    int temp0, temp3, m;
+    float phi_sign = 1 - 2 * (kx & 1);
+
+    for (m = 0; m < m_max; m++) {
+
+        float *ff_table;
+        float *Y1=&Y[m][0];
+        const float *s_m1=&s_m[m];
+        const float *q_filt1= &q_filt[m];
+
+        __asm__ volatile(
+            "lwc1   %[y1],       4(%[Y1])                                     \n\t"
+            "lwc1   %[temp1],    0(%[s_m1])                                   \n\t"
+            "lw     %[temp3],    0(%[s_m1])                                   \n\t"
+            "addiu  %[noise],    %[noise],               1                    \n\t"
+            "andi   %[noise],    %[noise],               0x1ff                \n\t"
+            "sll    %[temp0],    %[noise],               3                    \n\t"
+            "addu   %[ff_table], %[ff_sbr_noise_table], %[temp0]              \n\t"
+            "madd.s %[y1],       %[y1],                 %[temp1], %[phi_sign] \n\t"
+            "bne    %[temp3],    $0,                    1f                    \n\t"
+            "lwc1   %[y0],       0(%[Y1])                                     \n\t"
+            "lwc1   %[temp2],    0(%[q_filt1])                                \n\t"
+            "lwc1   %[temp4],    0(%[ff_table])                               \n\t"
+            "lwc1   %[temp5],    4(%[ff_table])                               \n\t"
+            "madd.s %[y0],       %[y0],                 %[temp2], %[temp4]    \n\t"
+            "madd.s %[y1],       %[y1],                 %[temp2], %[temp5]    \n\t"
+            "swc1   %[y0],       0(%[Y1])                                     \n\t"
+        "1:                                                                   \n\t"
+            "swc1   %[y1],       4(%[Y1])                                     \n\t"
+
+            : [ff_table] "=&r" (ff_table), [y0] "=&f" (y0), [y1] "=&f" (y1),
+              [temp0] "=&r" (temp0), [temp1] "=&f" (temp1), [temp2] "=&f" (temp2),
+              [temp3] "=&r" (temp3), [temp4] "=&f" (temp4), [temp5] "=&f" (temp5)
+            : [ff_sbr_noise_table] "r" (ff_sbr_noise_table), [noise] "r" (noise),
+              [Y1] "r" (Y1), [s_m1] "r" (s_m1), [q_filt1] "r" (q_filt1),
+              [phi_sign] "f" (phi_sign)
+            : "memory"
+        );
+        phi_sign = -phi_sign;
+    }
+}
+
+static void sbr_hf_apply_noise_2_mips(float (*Y)[2], const float *s_m,
+                                 const float *q_filt, int noise,
+                                 int kx, int m_max)
+{
+    int m;
+    float *ff_table;
+    float y0,y1, temp0, temp1, temp2, temp3, temp4, temp5;
+
+    for (m = 0; m < m_max; m++) {
+
+        float *Y1=&Y[m][0];
+        const float *s_m1=&s_m[m];
+        const float *q_filt1= &q_filt[m];
+
+        __asm__ volatile(
+            "lwc1   %[y0],       0(%[Y1])                                  \n\t"
+            "lwc1   %[temp1],    0(%[s_m1])                                \n\t"
+            "addiu  %[noise],    %[noise],              1                  \n\t"
+            "andi   %[noise],    %[noise],              0x1ff              \n\t"
+            "sll    %[temp0],    %[noise],              3                  \n\t"
+            "addu   %[ff_table], %[ff_sbr_noise_table], %[temp0]           \n\t"
+            "sub.s  %[y0],       %[y0],                 %[temp1]           \n\t"
+            "mfc1   %[temp3],    %[temp1]                                  \n\t"
+            "bne    %[temp3],    $0,                    1f                 \n\t"
+            "lwc1   %[y1],       4(%[Y1])                                  \n\t"
+            "lwc1   %[temp2],    0(%[q_filt1])                             \n\t"
+            "lwc1   %[temp4],    0(%[ff_table])                            \n\t"
+            "lwc1   %[temp5],    4(%[ff_table])                            \n\t"
+            "madd.s %[y0],       %[y0],                 %[temp2], %[temp4] \n\t"
+            "madd.s %[y1],       %[y1],                 %[temp2], %[temp5] \n\t"
+            "swc1   %[y1],       4(%[Y1])                                  \n\t"
+        "1:                                                                \n\t"
+            "swc1   %[y0],       0(%[Y1])                                  \n\t"
+
+            : [temp0]"=&r"(temp0), [ff_table]"=&r"(ff_table), [y0]"=&f"(y0),
+              [y1]"=&f"(y1), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+              [temp3]"=&r"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
+            : [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
+              [Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1)
+            : "memory"
+        );
+    }
+}
+
+static void sbr_hf_apply_noise_3_mips(float (*Y)[2], const float *s_m,
+                                 const float *q_filt, int noise,
+                                 int kx, int m_max)
+{
+    float phi_sign = 1 - 2 * (kx & 1);
+    int m;
+
+    for (m = 0; m < m_max; m++) {
+
+        float *Y1=&Y[m][0];
+        float *ff_table;
+        float y0,y1, temp1, temp2, temp4, temp5;
+        int temp0, temp3;
+        const float *s_m1=&s_m[m];
+        const float *q_filt1= &q_filt[m];
+
+        __asm__ volatile(
+            "lwc1    %[y1],       4(%[Y1])                                     \n\t"
+            "lwc1    %[temp1],    0(%[s_m1])                                   \n\t"
+            "addiu   %[noise],    %[noise],              1                     \n\t"
+            "andi    %[noise],    %[noise],              0x1ff                 \n\t"
+            "sll     %[temp0],    %[noise],              3                     \n\t"
+            "addu    %[ff_table], %[ff_sbr_noise_table], %[temp0]              \n\t"
+            "nmsub.s %[y1],       %[y1],                 %[temp1], %[phi_sign] \n\t"
+            "mfc1    %[temp3],    %[temp1]                                     \n\t"
+            "bne     %[temp3],    $0,                    1f                    \n\t"
+            "lwc1    %[y0],       0(%[Y1])                                     \n\t"
+            "lwc1    %[temp2],    0(%[q_filt1])                                \n\t"
+            "lwc1    %[temp4],    0(%[ff_table])                               \n\t"
+            "lwc1    %[temp5],    4(%[ff_table])                               \n\t"
+            "madd.s  %[y0],       %[y0],                 %[temp2], %[temp4]    \n\t"
+            "madd.s  %[y1],       %[y1],                 %[temp2], %[temp5]    \n\t"
+            "swc1    %[y0],       0(%[Y1])                                     \n\t"
+            "1:                                                                \n\t"
+            "swc1    %[y1],       4(%[Y1])                                     \n\t"
+
+            : [ff_table]"=&r"(ff_table), [y0]"=&f"(y0), [y1]"=&f"(y1),
+              [temp0]"=&r"(temp0), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2),
+              [temp3]"=&r"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5)
+            : [ff_sbr_noise_table]"r"(ff_sbr_noise_table), [noise]"r"(noise),
+              [Y1]"r"(Y1), [s_m1]"r"(s_m1), [q_filt1]"r"(q_filt1),
+              [phi_sign]"f"(phi_sign)
+            : "memory"
+        );
+       phi_sign = -phi_sign;
+    }
+}
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+
+void ff_sbrdsp_init_mips(SBRDSPContext *s)
+{
+#if HAVE_INLINE_ASM
+    s->neg_odd_64 = sbr_neg_odd_64_mips;
+    s->qmf_pre_shuffle = sbr_qmf_pre_shuffle_mips;
+    s->qmf_post_shuffle = sbr_qmf_post_shuffle_mips;
+#if HAVE_MIPSFPU
+    s->sum64x5 = sbr_sum64x5_mips;
+    s->sum_square = sbr_sum_square_mips;
+    s->qmf_deint_bfly = sbr_qmf_deint_bfly_mips;
+    s->autocorrelate = sbr_autocorrelate_mips;
+    s->hf_gen = sbr_hf_gen_mips;
+    s->hf_g_filt = sbr_hf_g_filt_mips;
+
+    s->hf_apply_noise[0] = sbr_hf_apply_noise_0_mips;
+    s->hf_apply_noise[1] = sbr_hf_apply_noise_1_mips;
+    s->hf_apply_noise[2] = sbr_hf_apply_noise_2_mips;
+    s->hf_apply_noise[3] = sbr_hf_apply_noise_3_mips;
+#endif /* HAVE_MIPSFPU */
+#endif /* HAVE_INLINE_ASM */
+}
diff --git a/libavcodec/sbr.h b/libavcodec/sbr.h
index 1e41396..e28fccd 100644
--- a/libavcodec/sbr.h
+++ b/libavcodec/sbr.h
@@ -34,6 +34,8 @@
 #include "aacps.h"
 #include "sbrdsp.h"
 
+typedef struct AACContext AACContext;
+
 /**
  * Spectral Band Replication header - spectrum parameters that invoke a reset if they differ from the previous header.
  */
@@ -108,10 +110,31 @@ typedef struct SBRData {
     /** @} */
 } SBRData;
 
+typedef struct SpectralBandReplication SpectralBandReplication;
+
+/**
+ * aacsbr functions pointers
+ */
+typedef struct AACSBRContext {
+    int (*sbr_lf_gen)(AACContext *ac, SpectralBandReplication *sbr,
+                      float X_low[32][40][2], const float W[2][32][32][2],
+                      int buf_idx);
+    void (*sbr_hf_assemble)(float Y1[38][64][2],
+                            const float X_high[64][40][2],
+                            SpectralBandReplication *sbr, SBRData *ch_data,
+                            const int e_a[2]);
+    int (*sbr_x_gen)(SpectralBandReplication *sbr, float X[2][38][64],
+                     const float Y0[38][64][2], const float Y1[38][64][2],
+                     const float X_low[32][40][2], int ch);
+    void (*sbr_hf_inverse_filter)(SBRDSPContext *dsp,
+                                  float (*alpha0)[2], float (*alpha1)[2],
+                                  const float X_low[32][40][2], int k0);
+} AACSBRContext;
+
 /**
  * Spectral Band Replication
  */
-typedef struct SpectralBandReplication {
+struct SpectralBandReplication {
     int                sample_rate;
     int                start;
     int                reset;
@@ -184,6 +207,7 @@ typedef struct SpectralBandReplication {
     FFTContext         mdct_ana;
     FFTContext         mdct;
     SBRDSPContext      dsp;
-} SpectralBandReplication;
+    AACSBRContext      c;
+};
 
 #endif /* AVCODEC_SBR_H */
diff --git a/libavcodec/sbrdsp.c b/libavcodec/sbrdsp.c
index 781ec83..6fede79 100644
--- a/libavcodec/sbrdsp.c
+++ b/libavcodec/sbrdsp.c
@@ -245,4 +245,6 @@ av_cold void ff_sbrdsp_init(SBRDSPContext *s)
         ff_sbrdsp_init_arm(s);
     if (ARCH_X86)
         ff_sbrdsp_init_x86(s);
+    if (ARCH_MIPS)
+        ff_sbrdsp_init_mips(s);
 }
diff --git a/libavcodec/sbrdsp.h b/libavcodec/sbrdsp.h
index 07235c6..3831135 100644
--- a/libavcodec/sbrdsp.h
+++ b/libavcodec/sbrdsp.h
@@ -47,5 +47,6 @@ extern const float ff_sbr_noise_table[][2];
 void ff_sbrdsp_init(SBRDSPContext *s);
 void ff_sbrdsp_init_arm(SBRDSPContext *s);
 void ff_sbrdsp_init_x86(SBRDSPContext *s);
+void ff_sbrdsp_init_mips(SBRDSPContext *s);
 
 #endif /* AVCODEC_SBRDSP_H */
-- 
1.7.3.4



More information about the ffmpeg-devel mailing list