[FFmpeg-devel] [PATCH] Altivec split-radix FFT

Loren Merritt lorenm
Tue Sep 29 21:18:49 CEST 2009


On Fri, 18 Sep 2009, M?ns Rullg?rd wrote:
> On Fri, 18 Sep 2009, Loren Merritt wrote:
>
>> Now without Apple workarounds.
>>
>> diff --git a/configure b/configure
>> index 6aa6177..fdb6ade 100755
>> --- a/configure
>> +++ b/configure
>> @@ -933,6 +933,7 @@ HAVE_LIST="
>>      fast_cmov
>>      fast_unaligned
>>      fork
>> +    fsf_gas
>>      gethrtime
>>      GetProcessTimes
>>      getrusage
>> @@ -2108,6 +2109,7 @@ elif enabled mips; then
>>  elif enabled ppc; then
>>
>>      check_asm dcbzl     '"dcbzl 0, 1"'
>> +    check_asm fsf_gas   '"add 0, 0, 0"'
>>      check_asm ppc4xx    '"maclhw r10, r11, r12"'
>>      check_asm xform_asm '"lwzx 0, %y0" :: "Z"(*(int*)0)'
>
> This is the syntax the IBM PPC documentation uses, so branding it fsf
> seems a bit wrong.

changed.

>> diff --git a/libavcodec/fft.c b/libavcodec/fft.c
>> index c827139..52bc73a 100644
>> --- a/libavcodec/fft.c
>> +++ b/libavcodec/fft.c
>> @@ -89,7 +89,7 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
>>      s->split_radix = 1;
>>
>>      if (ARCH_ARM)     ff_fft_init_arm(s);
>> -    if (HAVE_ALTIVEC) ff_fft_init_altivec(s);
>> +    if (HAVE_ALTIVEC && HAVE_FSF_GAS) ff_fft_init_altivec(s);
>>      if (HAVE_MMX)     ff_fft_init_mmx(s);
>>
>>      if (s->split_radix) {
>
> I'd rather hide this condition inside ff_fft_init_altivec().

changed.

>> diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c
>> index 83b2b7f..235f205 100644
>> --- a/libavcodec/ppc/fft_altivec.c
>> +++ b/libavcodec/ppc/fft_altivec.c
>> [...]
>> +extern FFTSample ff_cos_16[];
>
> Isn't this declared in some header file?  If not, it should be.
>
>> +// pointers to functions. but unlike function pointers on some PPC ABIs, these aren't function descriptors.
>
> What does that mean?  Not saying it's wrong, I just don't understand it.

<malc> ff_fft_dispatch_altivec[s->nbits-2] forces gcc to assume it's TOC
  and lead to funny results such as the one guys on the ML have mentioned.
<malc> the table should be declared as one just containing plain pointers
  (void * for instance) so that the compiler wont try to be smart about it.
<pengvado> function points are different from normal pointers on ppc?
<malc> on AIX PPC32 and PPC64 (any ABI) those point to function
  descriptor.
<malc> which is basically a pair of function address and a TOC
  corresponding to said function.
<malc> same is true for IA64 i think.

>>      s->fft_permute = ff_fft_permute_c;
>>      s->fft_calc    = ff_fft_calc_c;
>>      s->imdct_calc  = ff_imdct_calc_c;
>>      s->imdct_half  = ff_imdct_half_c;
>> -    s->mdct_calc   = ff_mdct_calc_c;
>
> I think that line should stay.

changed.

>> +static void ff_imdct_half_altivec(MDCTContext *s, FFTSample *output, const FFTSample *input)
>> +{
>> +    int j, k;
>> +    int n = 1 << s->nbits;
>> +    int n4 = n >> 2;
>> +    int n8 = n >> 3;
>> +    int n32 = n >> 5;
>> +    const uint16_t *revtabj = s->fft.revtab;
>> +    const uint16_t *revtabk = s->fft.revtab+n4;
>> +    const vec_f *tcos = (const vec_f*)(s->tcos+n8);
>> +    const vec_f *tsin = (const vec_f*)(s->tsin+n8);
>> +    const vec_f *pin = (const vec_f*)(input+n4);
>> +    vec_f *pout = (vec_f*)(output+n4);
>
> Why the intrinsics here?

Because gcc doesn't screw it up. I tried an asm version, but it's 100 
lines longer and not at all faster.

--Loren Merritt
-------------- next part --------------
>From 2e1bbb7292236c504a5f617d439258ce5ceec789 Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Sat, 22 Aug 2009 11:22:09 +0100
Subject: [PATCH 1/3] altivec split-radix FFT
 1.8x faster than altivec radix-2 on a G4
 8% faster vorbis decoding

---
 configure                      |    2 +
 libavcodec/Makefile            |    4 +-
 libavcodec/dsputil.h           |   13 ++
 libavcodec/ppc/asm.S           |   59 ++++++++
 libavcodec/ppc/fft_altivec.c   |  156 +++++++-------------
 libavcodec/ppc/fft_altivec_s.S |  322 ++++++++++++++++++++++++++++++++++++++++
 libavcodec/ppc/types_altivec.h |    1 +
 7 files changed, 456 insertions(+), 101 deletions(-)
 create mode 100644 libavcodec/ppc/asm.S
 create mode 100644 libavcodec/ppc/fft_altivec_s.S

diff --git a/configure b/configure
index b174965..7b0d998 100755
--- a/configure
+++ b/configure
@@ -936,6 +936,7 @@ HAVE_LIST="
     gethrtime
     GetProcessTimes
     getrusage
+    ibm_gas
     inet_aton
     inline_asm
     isatty
@@ -2116,6 +2117,7 @@ elif enabled mips; then
 elif enabled ppc; then
 
     check_asm dcbzl     '"dcbzl 0, 1"'
+    check_asm ibm_gas   '"add 0, 0, 0"'
     check_asm ppc4xx    '"maclhw r10, r11, r12"'
     check_asm xform_asm '"lwzx 0, %y0" :: "Z"(*(int*)0)'
 
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 8aabeb9..e3c325c 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -523,6 +523,9 @@ OBJS-$(ARCH_BFIN)                      += bfin/dsputil_bfin.o           \
 
 OBJS-$(ARCH_PPC)                       += ppc/dsputil_ppc.o             \
 
+ALTIVEC-OBJS-FFT-$(HAVE_IBM_GAS)       += ppc/fft_altivec_s.o
+ALTIVEC-OBJS-$(CONFIG_FFT)             += ppc/fft_altivec.o             \
+                                          $(ALTIVEC-OBJS-FFT-yes)
 ALTIVEC-OBJS-$(CONFIG_H264_DECODER)    += ppc/h264_altivec.o
 ALTIVEC-OBJS-$(CONFIG_VC1_DECODER)     += ppc/vc1dsp_altivec.o
 ALTIVEC-OBJS-$(CONFIG_VP3_DECODER)     += ppc/vp3dsp_altivec.o
@@ -532,7 +535,6 @@ ALTIVEC-OBJS-$(CONFIG_VP6_DECODER)     += ppc/vp3dsp_altivec.o
 OBJS-$(HAVE_ALTIVEC)                   += ppc/check_altivec.o           \
                                           ppc/dsputil_altivec.o         \
                                           ppc/fdct_altivec.o            \
-                                          ppc/fft_altivec.o             \
                                           ppc/float_altivec.o           \
                                           ppc/gmc_altivec.o             \
                                           ppc/idct_altivec.o            \
diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h
index 58d5b49..6c612c7 100644
--- a/libavcodec/dsputil.h
+++ b/libavcodec/dsputil.h
@@ -742,6 +742,19 @@ typedef struct FFTContext {
 #define FF_MDCT_PERM_INTERLEAVE 1
 } FFTContext;
 
+extern FFTSample ff_cos_16[8];
+extern FFTSample ff_cos_32[16];
+extern FFTSample ff_cos_64[32];
+extern FFTSample ff_cos_128[64];
+extern FFTSample ff_cos_256[128];
+extern FFTSample ff_cos_512[256];
+extern FFTSample ff_cos_1024[512];
+extern FFTSample ff_cos_2048[1024];
+extern FFTSample ff_cos_4096[2048];
+extern FFTSample ff_cos_8192[4096];
+extern FFTSample ff_cos_16384[8192];
+extern FFTSample ff_cos_32768[16384];
+extern FFTSample ff_cos_65536[32768];
 extern FFTSample* const ff_cos_tabs[13];
 
 /**
diff --git a/libavcodec/ppc/asm.S b/libavcodec/ppc/asm.S
new file mode 100644
index 0000000..f272710
--- /dev/null
+++ b/libavcodec/ppc/asm.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2009 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+.macro DEFINE_REG n
+.equiv r\n, \n
+.equiv f\n, \n
+.equiv v\n, \n
+.endm
+DEFINE_REG 0
+DEFINE_REG 1
+DEFINE_REG 2
+DEFINE_REG 3
+DEFINE_REG 4
+DEFINE_REG 5
+DEFINE_REG 6
+DEFINE_REG 7
+DEFINE_REG 8
+DEFINE_REG 9
+DEFINE_REG 10
+DEFINE_REG 11
+DEFINE_REG 12
+DEFINE_REG 13
+DEFINE_REG 14
+DEFINE_REG 15
+DEFINE_REG 16
+DEFINE_REG 17
+DEFINE_REG 18
+DEFINE_REG 19
+DEFINE_REG 20
+DEFINE_REG 21
+DEFINE_REG 22
+DEFINE_REG 23
+DEFINE_REG 24
+DEFINE_REG 25
+DEFINE_REG 26
+DEFINE_REG 27
+DEFINE_REG 28
+DEFINE_REG 29
+DEFINE_REG 30
+DEFINE_REG 31
diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c
index 83b2b7f..a4fb86d 100644
--- a/libavcodec/ppc/fft_altivec.c
+++ b/libavcodec/ppc/fft_altivec.c
@@ -1,8 +1,7 @@
 /*
  * FFT/IFFT transforms
  * AltiVec-enabled
- * Copyright (c) 2003 Romain Dolbeau <romain at dolbeau.org>
- * Based on code Copyright (c) 2002 Fabrice Bellard
+ * Copyright (c) 2009 Loren Merritt
  *
  * This file is part of FFmpeg.
  *
@@ -23,6 +22,7 @@
 #include "libavcodec/dsputil.h"
 #include "dsputil_ppc.h"
 #include "util_altivec.h"
+#include "types_altivec.h"
 /**
  * Do a complex FFT with the parameters defined in ff_fft_init(). The
  * input data must be permuted before with s->revtab table. No
@@ -30,112 +30,68 @@
  * AltiVec-enabled
  * This code assumes that the 'z' pointer is 16 bytes-aligned
  * It also assumes all FFTComplex are 8 bytes-aligned pair of float
- * The code is exactly the same as the SSE version, except
- * that successive MUL + ADD/SUB have been merged into
- * fused multiply-add ('vec_madd' in altivec)
  */
-void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
-{
-POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6);
-    register const vector float vczero = (const vector float)vec_splat_u32(0.);
-
-    int ln = s->nbits;
-    int j, np, np2;
-    int nblocks, nloops;
-    register FFTComplex *p, *q;
-    FFTComplex *cptr, *cptr1;
-    int k;
-
-POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6);
-
-    np = 1 << ln;
-
-    {
-        vector float *r, a, b, a1, c1, c2;
-
-        r = (vector float *)&z[0];
-
-        c1 = vcii(p,p,n,n);
-
-        if (s->inverse) {
-            c2 = vcii(p,p,n,p);
-        } else {
-            c2 = vcii(p,p,p,n);
-        }
-
-        j = (np >> 2);
-        do {
-            a = vec_ld(0, r);
-            a1 = vec_ld(sizeof(vector float), r);
-
-            b = vec_perm(a,a,vcprmle(1,0,3,2));
-            a = vec_madd(a,c1,b);
-            /* do the pass 0 butterfly */
-
-            b = vec_perm(a1,a1,vcprmle(1,0,3,2));
-            b = vec_madd(a1,c1,b);
-            /* do the pass 0 butterfly */
 
-            /* multiply third by -i */
-            b = vec_perm(b,b,vcprmle(2,3,1,0));
+#if HAVE_IBM_GAS
+// Pointers to functions. Not using function pointer syntax, because
+// that involves an extra level of indirection on some PPC ABIs.
+extern void *ff_fft_dispatch_altivec[2][15];
 
-            /* do the pass 1 butterfly */
-            vec_st(vec_madd(b,c2,a), 0, r);
-            vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r);
-
-            r += 2;
-        } while (--j != 0);
+// Convert from simd order to C order.
+static void swizzle(vec_f *z, int n)
+{
+    int i;
+    n >>= 1;
+    for (i = 0; i < n; i += 2) {
+        vec_f re = z[i];
+        vec_f im = z[i+1];
+        z[i]   = vec_mergeh(re, im);
+        z[i+1] = vec_mergel(re, im);
     }
-    /* pass 2 .. ln-1 */
-
-    nblocks = np >> 3;
-    nloops = 1 << 2;
-    np2 = np >> 1;
-
-    cptr1 = s->exptab1;
-    do {
-        p = z;
-        q = z + nloops;
-        j = nblocks;
-        do {
-            cptr = cptr1;
-            k = nloops >> 1;
-            do {
-                vector float a,b,c,t1;
-
-                a = vec_ld(0, (float*)p);
-                b = vec_ld(0, (float*)q);
-
-                /* complex mul */
-                c = vec_ld(0, (float*)cptr);
-                /*  cre*re cim*re */
-                t1 = vec_madd(c, vec_perm(b,b,vcprmle(2,2,0,0)),vczero);
-                c = vec_ld(sizeof(vector float), (float*)cptr);
-                /*  -cim*im cre*im */
-                b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1);
-
-                /* butterfly */
-                vec_st(vec_add(a,b), 0, (float*)p);
-                vec_st(vec_sub(a,b), 0, (float*)q);
-
-                p += 2;
-                q += 2;
-                cptr += 4;
-            } while (--k);
-
-            p += nloops;
-            q += nloops;
-        } while (--j);
-        cptr1 += nloops * 2;
-        nblocks = nblocks >> 1;
-        nloops = nloops << 1;
-    } while (nblocks != 0);
+}
 
-POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
+void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
+{
+    register vec_f  v14 __asm__("v14") = {0,0,0,0};
+    register vec_f  v15 __asm__("v15") = *(const vec_f*)ff_cos_16;
+    register vec_f  v16 __asm__("v16") = {0, 0.38268343, M_SQRT1_2, 0.92387953};
+    register vec_f  v17 __asm__("v17") = {-M_SQRT1_2, M_SQRT1_2, M_SQRT1_2,-M_SQRT1_2};
+    register vec_f  v18 __asm__("v18") = { M_SQRT1_2, M_SQRT1_2, M_SQRT1_2, M_SQRT1_2};
+    register vec_u8 v19 __asm__("v19") = vcprm(s0,3,2,1);
+    register vec_u8 v20 __asm__("v20") = vcprm(0,1,s2,s1);
+    register vec_u8 v21 __asm__("v21") = vcprm(2,3,s0,s3);
+    register vec_u8 v22 __asm__("v22") = vcprm(2,s3,3,s2);
+    register vec_u8 v23 __asm__("v23") = vcprm(0,1,s0,s1);
+    register vec_u8 v24 __asm__("v24") = vcprm(2,3,s2,s3);
+    register vec_u8 v25 __asm__("v25") = vcprm(2,3,0,1);
+    register vec_u8 v26 __asm__("v26") = vcprm(1,2,s3,s0);
+    register vec_u8 v27 __asm__("v27") = vcprm(0,3,s2,s1);
+    register vec_u8 v28 __asm__("v28") = vcprm(0,2,s1,s3);
+    register vec_u8 v29 __asm__("v29") = vcprm(1,3,s0,s2);
+    register FFTSample *const*cos_tabs __asm__("r12") = ff_cos_tabs;
+    register FFTComplex *zarg __asm__("r3") = z;
+    __asm__(
+        "mtctr %0 \n"
+        "li   9,16 \n"
+        "subi 1,1,%1 \n"
+        "bctrl \n"
+        "addi 1,1,%1 \n"
+        ::"r"(ff_fft_dispatch_altivec[1][s->nbits-2]), "i"(12*sizeof(void*)),
+          "r"(zarg), "r"(cos_tabs),
+          "v"(v14),"v"(v15),"v"(v16),"v"(v17),"v"(v18),"v"(v19),"v"(v20),"v"(v21),
+          "v"(v22),"v"(v23),"v"(v24),"v"(v25),"v"(v26),"v"(v27),"v"(v28),"v"(v29)
+        : "lr","ctr","r0","r4","r5","r6","r7","r8","r9","r10","r11",
+          "v0","v1","v2","v3","v4","v5","v6","v7","v8","v9","v10","v11","v12","v13"
+    );
+    if (s->nbits <= 4)
+        swizzle((vec_f*)z, 1<<s->nbits);
 }
+#endif // HAVE_IBM_GAS
 
 av_cold void ff_fft_init_altivec(FFTContext *s)
 {
+#if HAVE_IBM_GAS
     s->fft_calc = ff_fft_calc_altivec;
-    s->split_radix = 0;
+    s->split_radix = 1;
+#endif
 }
diff --git a/libavcodec/ppc/fft_altivec_s.S b/libavcodec/ppc/fft_altivec_s.S
new file mode 100644
index 0000000..d6717b5
--- /dev/null
+++ b/libavcodec/ppc/fft_altivec_s.S
@@ -0,0 +1,322 @@
+/*
+ * FFT transform with Altivec optimizations
+ * Copyright (c) 2009 Loren Merritt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+ * These functions are not individually interchangeable with the C versions.
+ * While C takes arrays of FFTComplex, Altivec leaves intermediate results
+ * in blocks as convenient to the vector size.
+ * i.e. {4x real, 4x imaginary, 4x real, ...}
+ *
+ * I ignore standard calling convention.
+ * Instead, the following registers are treated as global constants:
+ * v14: zero
+ * v15..v18: cosines
+ * v19..v29: permutations
+ * r9: 16
+ * r12: ff_cos_tabs
+ * and the rest are free for local use.
+ */
+
+#include "asm.S"
+
+.text
+
+.macro slwi ra, rb, imm
+    rlwinm \ra, \rb, \imm, 0, 31-\imm
+.endm
+
+.macro addi2 ra, imm // add 32-bit immediate
+.if \imm & 0xffff
+    addi \ra, \ra, \imm at l
+.endif
+.if (\imm+0x8000)>>16
+    addis \ra, \ra, \imm at ha
+.endif
+.endm
+
+#if _ARCH_PPC64
+#define PTR .quad
+.macro LOAD_PTR ra, rbase, offset
+    ld  \ra,(\offset)*8(\rbase)
+.endm
+.macro STORE_PTR ra, rbase, offset
+    std \ra,(\offset)*8(\rbase)
+.endm
+#else
+#define PTR .int
+.macro LOAD_PTR ra, rbase, offset
+    lwz \ra,(\offset)*4(\rbase)
+.endm
+.macro STORE_PTR ra, rbase, offset
+    stw \ra,(\offset)*4(\rbase)
+.endm
+#endif
+
+.macro FFT4 a0, a1, a2, a3 // in:0-1 out:2-3
+    vperm   \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
+    vperm   \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
+    vaddfp  \a0,\a2,\a3                         // {t1,t2,t6,t5}
+    vsubfp  \a1,\a2,\a3                         // {t3,t4,t8,t7}
+    vmrghw  \a2,\a0,\a1     // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
+    vperm   \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
+    vaddfp  \a0,\a2,\a3                         // {r0,r1,i0,i1}
+    vsubfp  \a1,\a2,\a3                         // {r2,r3,i2,i3}
+    vperm   \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
+    vperm   \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
+.endm
+
+.macro FFT4x2 a0, a1, b0, b1, a2, a3, b2, b3
+    vperm   \a2,\a0,\a1,v20 // vcprm(0,1,s2,s1) // {r0,i0,r3,i2}
+    vperm   \a3,\a0,\a1,v21 // vcprm(2,3,s0,s3) // {r1,i1,r2,i3}
+    vperm   \b2,\b0,\b1,v20
+    vperm   \b3,\b0,\b1,v21
+    vaddfp  \a0,\a2,\a3                         // {t1,t2,t6,t5}
+    vsubfp  \a1,\a2,\a3                         // {t3,t4,t8,t7}
+    vaddfp  \b0,\b2,\b3
+    vsubfp  \b1,\b2,\b3
+    vmrghw  \a2,\a0,\a1     // vcprm(0,s0,1,s1) // {t1,t3,t2,t4}
+    vperm   \a3,\a0,\a1,v22 // vcprm(2,s3,3,s2) // {t6,t7,t5,t8}
+    vmrghw  \b2,\b0,\b1
+    vperm   \b3,\b0,\b1,v22
+    vaddfp  \a0,\a2,\a3                         // {r0,r1,i0,i1}
+    vsubfp  \a1,\a2,\a3                         // {r2,r3,i2,i3}
+    vaddfp  \b0,\b2,\b3
+    vsubfp  \b1,\b2,\b3
+    vperm   \a2,\a0,\a1,v23 // vcprm(0,1,s0,s1) // {r0,r1,r2,r3}
+    vperm   \a3,\a0,\a1,v24 // vcprm(2,3,s2,s3) // {i0,i1,i2,i3}
+    vperm   \b2,\b0,\b1,v23
+    vperm   \b3,\b0,\b1,v24
+.endm
+
+.macro FFT8 a0, a1, b0, b1, a2, a3, b2, b3, b4 // in,out:a0-b1
+    vmrghw  \b2,\b0,\b1     // vcprm(0,s0,1,s1) // {r4,r6,i4,i6}
+    vmrglw  \b3,\b0,\b1     // vcprm(2,s2,3,s3) // {r5,r7,i5,i7}
+    vperm   \a2,\a0,\a1,v20         // FFT4 ...
+    vperm   \a3,\a0,\a1,v21
+    vaddfp  \b0,\b2,\b3                         // {t1,t3,t2,t4}
+    vsubfp  \b1,\b2,\b3                         // {r5,r7,i5,i7}
+    vperm   \b4,\b1,\b1,v25 // vcprm(2,3,0,1)   // {i5,i7,r5,r7}
+    vaddfp  \a0,\a2,\a3
+    vsubfp  \a1,\a2,\a3
+    vmaddfp \b1,\b1,v17,v14 // * {-1,1,1,-1}/sqrt(2)
+    vmaddfp \b1,\b4,v18,\b1 // * { 1,1,1,1 }/sqrt(2) // {t8,ta,t7,t9}
+    vmrghw  \a2,\a0,\a1
+    vperm   \a3,\a0,\a1,v22
+    vperm   \b2,\b0,\b1,v26 // vcprm(1,2,s3,s0) // {t3,t2,t9,t8}
+    vperm   \b3,\b0,\b1,v27 // vcprm(0,3,s2,s1) // {t1,t4,t7,ta}
+    vaddfp  \a0,\a2,\a3
+    vsubfp  \a1,\a2,\a3
+    vaddfp  \b0,\b2,\b3                         // {t1,t2,t9,ta}
+    vsubfp  \b1,\b2,\b3                         // {t6,t5,tc,tb}
+    vperm   \a2,\a0,\a1,v23
+    vperm   \a3,\a0,\a1,v24
+    vperm   \b2,\b0,\b1,v28 // vcprm(0,2,s1,s3) // {t1,t9,t5,tb}
+    vperm   \b3,\b0,\b1,v29 // vcprm(1,3,s0,s2) // {t2,ta,t6,tc}
+    vsubfp  \b0,\a2,\b2                         // {r4,r5,r6,r7}
+    vsubfp  \b1,\a3,\b3                         // {i4,i5,i6,i7}
+    vaddfp  \a0,\a2,\b2                         // {r0,r1,r2,r3}
+    vaddfp  \a1,\a3,\b3                         // {i0,i1,i2,i3}
+.endm
+
+.macro BF d0,d1,s0,s1
+    vsubfp  \d1,\s0,\s1
+    vaddfp  \d0,\s0,\s1
+.endm
+
+fft4_altivec:
+    lvx    v0, 0,r3
+    lvx    v1,r9,r3
+    FFT4   v0,v1,v2,v3
+    stvx   v2, 0,r3
+    stvx   v3,r9,r3
+    blr
+
+fft8_altivec:
+    addi   r4,r3,32
+    lvx    v0, 0,r3
+    lvx    v1,r9,r3
+    lvx    v2, 0,r4
+    lvx    v3,r9,r4
+    FFT8   v0,v1,v2,v3,v4,v5,v6,v7,v8
+    stvx   v0, 0,r3
+    stvx   v1,r9,r3
+    stvx   v2, 0,r4
+    stvx   v3,r9,r4
+    blr
+
+fft16_altivec:
+    addi   r5,r3,64
+    addi   r6,r3,96
+    addi   r4,r3,32
+    lvx    v0, 0,r5
+    lvx    v1,r9,r5
+    lvx    v2, 0,r6
+    lvx    v3,r9,r6
+    FFT4x2 v0,v1,v2,v3,v4,v5,v6,v7
+    lvx    v0, 0,r3
+    lvx    v1,r9,r3
+    lvx    v2, 0,r4
+    lvx    v3,r9,r4
+    FFT8   v0,v1,v2,v3,v8,v9,v10,v11,v12
+    vmaddfp   v8,v4,v15,v14 // r2*wre
+    vmaddfp   v9,v5,v15,v14 // i2*wre
+    vmaddfp  v10,v6,v15,v14 // r3*wre
+    vmaddfp  v11,v7,v15,v14 // i3*wre
+    vmaddfp   v8,v5,v16,v8  // i2*wim
+    vnmsubfp  v9,v4,v16,v9  // r2*wim
+    vnmsubfp v10,v7,v16,v10 // i3*wim
+    vmaddfp  v11,v6,v16,v11 // r3*wim
+    BF     v10,v12,v10,v8
+    BF     v11,v13,v9,v11
+    BF     v0,v4,v0,v10
+    BF     v3,v7,v3,v12
+    stvx   v0, 0,r3
+    stvx   v4, 0,r5
+    stvx   v3,r9,r4
+    stvx   v7,r9,r6
+    BF     v1,v5,v1,v11
+    BF     v2,v6,v2,v13
+    stvx   v1,r9,r3
+    stvx   v5,r9,r5
+    stvx   v2, 0,r4
+    stvx   v6, 0,r6
+    blr
+
+// void pass(float *z, float *wre, int n)
+.macro PASS interleave, suffix
+fft_pass\suffix\()_altivec:
+    mtctr  5
+    slwi   r0,r5,4
+    slwi   r7,r5,6   // o2
+    slwi   r5,r5,5   // o1
+    add   r10,r5,r7  // o3
+    add    r0,r4,r0  // wim
+    addi   r6,r5,16  // o1+16
+    addi   r8,r7,16  // o2+16
+    addi  r11,r10,16 // o3+16
+1:
+    lvx    v8, 0,r4  // wre
+    lvx   v10, 0,r0  // wim
+    sub    r0,r0,r9
+    lvx    v9, 0,r0
+    vperm  v9,v9,v10,v19   // vcprm(s0,3,2,1) => wim[0 .. -3]
+    lvx    v4,r3,r7        // r2 = z[o2]
+    lvx    v5,r3,r8        // i2 = z[o2+16]
+    lvx    v6,r3,r10       // r3 = z[o3]
+    lvx    v7,r3,r11       // i3 = z[o3+16]
+    vmaddfp  v10,v4,v8,v14 // r2*wre
+    vmaddfp  v11,v5,v8,v14 // i2*wre
+    vmaddfp  v12,v6,v8,v14 // r3*wre
+    vmaddfp  v13,v7,v8,v14 // i3*wre
+    lvx    v0, 0,r3        // r0 = z[0]
+    lvx    v3,r3,r6        // i1 = z[o1+16]
+    vmaddfp  v10,v5,v9,v10 // i2*wim
+    vnmsubfp v11,v4,v9,v11 // r2*wim
+    vnmsubfp v12,v7,v9,v12 // i3*wim
+    vmaddfp  v13,v6,v9,v13 // r3*wim
+    lvx    v1,r3,r9        // i0 = z[16]
+    lvx    v2,r3,r5        // r1 = z[o1]
+    BF     v12,v8,v12,v10
+    BF     v13,v9,v11,v13
+    BF     v0,v4,v0,v12
+    BF     v3,v7,v3,v8
+.if !\interleave
+    stvx   v0, 0,r3
+    stvx   v4,r3,r7
+    stvx   v3,r3,r6
+    stvx   v7,r3,r11
+.endif
+    BF     v1,v5,v1,v13
+    BF     v2,v6,v2,v9
+.if !\interleave
+    stvx   v1,r3,r9
+    stvx   v2,r3,r5
+    stvx   v5,r3,r8
+    stvx   v6,r3,r10
+.else
+    vmrghw v8,v0,v1
+    vmrglw v9,v0,v1
+    stvx   v8, 0,r3
+    stvx   v9,r3,r9
+    vmrghw v8,v2,v3
+    vmrglw v9,v2,v3
+    stvx   v8,r3,r5
+    stvx   v9,r3,r6
+    vmrghw v8,v4,v5
+    vmrglw v9,v4,v5
+    stvx   v8,r3,r7
+    stvx   v9,r3,r8
+    vmrghw v8,v6,v7
+    vmrglw v9,v6,v7
+    stvx   v8,r3,r10
+    stvx   v9,r3,r11
+.endif
+    addi   r3,r3,32
+    addi   r4,r4,16
+    bdnz 1b
+    sub    r3,r3,r5
+    blr
+.endm
+
+.macro DECL_FFT suffix, bits, n, n2, n4
+fft\n\suffix\()_altivec:
+    mflr  r0
+    STORE_PTR r0,r1,\bits-5
+    bl    fft\n2\()_altivec
+    addi2 r3,\n*4
+    bl    fft\n4\()_altivec
+    addi2 r3,\n*2
+    bl    fft\n4\()_altivec
+    addi2 r3,\n*-6
+    LOAD_PTR r0,r1,\bits-5
+    LOAD_PTR r4,r12,\bits-4
+    mtlr  r0
+    li    r5,\n/16
+    b     fft_pass\suffix\()_altivec
+.endm
+
+.macro DECL_FFTS interleave, suffix
+PASS \interleave, \suffix
+DECL_FFT \suffix,5,32,16,8
+DECL_FFT \suffix,6,64,32,16
+DECL_FFT \suffix,7,128,64,32
+DECL_FFT \suffix,8,256,128,64
+DECL_FFT \suffix,9,512,256,128
+DECL_FFT \suffix,10,1024,512,256
+DECL_FFT \suffix,11,2048,1024,512
+DECL_FFT \suffix,12,4096,2048,1024
+DECL_FFT \suffix,13,8192,4096,2048
+DECL_FFT \suffix,14,16384,8192,4096
+DECL_FFT \suffix,15,32768,16384,8192
+DECL_FFT \suffix,16,65536,32768,16384
+.rodata
+.global ff_fft_dispatch\suffix\()_altivec
+ff_fft_dispatch\suffix\()_altivec:
+PTR fft4_altivec, fft8_altivec, fft16_altivec, fft32\suffix\()_altivec,\
+    fft64\suffix\()_altivec, fft128\suffix\()_altivec, fft256\suffix\()_altivec,\
+    fft512\suffix\()_altivec, fft1024\suffix\()_altivec, fft2048\suffix\()_altivec,\
+    fft4096\suffix\()_altivec, fft8192\suffix\()_altivec, fft16384\suffix\()_altivec,\
+    fft32768\suffix\()_altivec, fft65536\suffix\()_altivec
+.text
+.endm
+
+DECL_FFTS 0
+DECL_FFTS 1, _interleave
diff --git a/libavcodec/ppc/types_altivec.h b/libavcodec/ppc/types_altivec.h
index 2870e83..36b6e1f 100644
--- a/libavcodec/ppc/types_altivec.h
+++ b/libavcodec/ppc/types_altivec.h
@@ -30,6 +30,7 @@
 #define vec_s16 vector signed short
 #define vec_u32 vector unsigned int
 #define vec_s32 vector signed int
+#define vec_f   vector float
 
 /***********************************************************************
  * Null vector
-- 
1.6.4.4

-------------- next part --------------
>From e01bbf582ecd5b981fb58af0df1632a9670fa2b1 Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Wed, 26 Aug 2009 09:44:32 +0100
Subject: [PATCH 2/3] remove vestiges of radix-2 FFT

---
 libavcodec/dsputil.h         |    3 -
 libavcodec/fft.c             |   82 +++---------------------------------------------------
 libavcodec/ppc/fft_altivec.c |    1 -
 3 files changed, 5 insertions(+), 81 deletions(-)

diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h
index 6c612c7..1c7fb99 100644
--- a/libavcodec/dsputil.h
+++ b/libavcodec/dsputil.h
@@ -723,8 +723,6 @@ typedef struct FFTContext {
     int nbits;
     int inverse;
     uint16_t *revtab;
-    FFTComplex *exptab;
-    FFTComplex *exptab1; /* only used by SSE code */
     FFTComplex *tmp_buf;
     int mdct_size; /* size of MDCT (i.e. number of input data * 2) */
     int mdct_bits; /* n = 2^nbits */
@@ -736,7 +734,6 @@ typedef struct FFTContext {
     void (*imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
     void (*imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
     void (*mdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
-    int split_radix;
     int permutation;
 #define FF_MDCT_PERM_NONE       0
 #define FF_MDCT_PERM_INTERLEAVE 1
diff --git a/libavcodec/fft.c b/libavcodec/fft.c
index c827139..f9280b1 100644
--- a/libavcodec/fft.c
+++ b/libavcodec/fft.c
@@ -60,39 +60,31 @@ static int split_radix_permutation(int i, int n, int inverse)
 
 av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
 {
-    int i, j, m, n;
-    float alpha, c1, s1, s2;
-    int av_unused has_vectors;
+    int i, j, n;
 
     if (nbits < 2 || nbits > 16)
         goto fail;
     s->nbits = nbits;
     n = 1 << nbits;
 
-    s->tmp_buf = NULL;
-    s->exptab  = av_malloc((n / 2) * sizeof(FFTComplex));
-    if (!s->exptab)
-        goto fail;
     s->revtab = av_malloc(n * sizeof(uint16_t));
     if (!s->revtab)
         goto fail;
+    s->tmp_buf = av_malloc(n * sizeof(FFTComplex));
+    if (!s->tmp_buf)
+        goto fail;
     s->inverse = inverse;
 
-    s2 = inverse ? 1.0 : -1.0;
-
     s->fft_permute = ff_fft_permute_c;
     s->fft_calc    = ff_fft_calc_c;
     s->imdct_calc  = ff_imdct_calc_c;
     s->imdct_half  = ff_imdct_half_c;
     s->mdct_calc   = ff_mdct_calc_c;
-    s->exptab1     = NULL;
-    s->split_radix = 1;
 
     if (ARCH_ARM)     ff_fft_init_arm(s);
     if (HAVE_ALTIVEC) ff_fft_init_altivec(s);
     if (HAVE_MMX)     ff_fft_init_mmx(s);
 
-    if (s->split_radix) {
         for(j=4; j<=nbits; j++) {
             int m = 1<<j;
             double freq = 2*M_PI/m;
@@ -104,91 +96,27 @@ av_cold int ff_fft_init(FFTContext *s, int nbits, int inverse)
         }
         for(i=0; i<n; i++)
             s->revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] = i;
-        s->tmp_buf = av_malloc(n * sizeof(FFTComplex));
-    } else {
-        int np, nblocks, np2, l;
-        FFTComplex *q;
-
-        for(i=0; i<(n/2); i++) {
-            alpha = 2 * M_PI * (float)i / (float)n;
-            c1 = cos(alpha);
-            s1 = sin(alpha) * s2;
-            s->exptab[i].re = c1;
-            s->exptab[i].im = s1;
-        }
-
-        np = 1 << nbits;
-        nblocks = np >> 3;
-        np2 = np >> 1;
-        s->exptab1 = av_malloc(np * 2 * sizeof(FFTComplex));
-        if (!s->exptab1)
-            goto fail;
-        q = s->exptab1;
-        do {
-            for(l = 0; l < np2; l += 2 * nblocks) {
-                *q++ = s->exptab[l];
-                *q++ = s->exptab[l + nblocks];
-
-                q->re = -s->exptab[l].im;
-                q->im = s->exptab[l].re;
-                q++;
-                q->re = -s->exptab[l + nblocks].im;
-                q->im = s->exptab[l + nblocks].re;
-                q++;
-            }
-            nblocks = nblocks >> 1;
-        } while (nblocks != 0);
-        av_freep(&s->exptab);
-
-        /* compute bit reverse table */
-        for(i=0;i<n;i++) {
-            m=0;
-            for(j=0;j<nbits;j++) {
-                m |= ((i >> j) & 1) << (nbits-j-1);
-            }
-            s->revtab[i]=m;
-        }
-    }
 
     return 0;
  fail:
     av_freep(&s->revtab);
-    av_freep(&s->exptab);
-    av_freep(&s->exptab1);
     av_freep(&s->tmp_buf);
     return -1;
 }
 
 void ff_fft_permute_c(FFTContext *s, FFTComplex *z)
 {
-    int j, k, np;
-    FFTComplex tmp;
+    int j, np;
     const uint16_t *revtab = s->revtab;
     np = 1 << s->nbits;
-
-    if (s->tmp_buf) {
         /* TODO: handle split-radix permute in a more optimal way, probably in-place */
         for(j=0;j<np;j++) s->tmp_buf[revtab[j]] = z[j];
         memcpy(z, s->tmp_buf, np * sizeof(FFTComplex));
-        return;
-    }
-
-    /* reverse */
-    for(j=0;j<np;j++) {
-        k = revtab[j];
-        if (k < j) {
-            tmp = z[k];
-            z[k] = z[j];
-            z[j] = tmp;
-        }
-    }
 }
 
 av_cold void ff_fft_end(FFTContext *s)
 {
     av_freep(&s->revtab);
-    av_freep(&s->exptab);
-    av_freep(&s->exptab1);
     av_freep(&s->tmp_buf);
 }
 
diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c
index a4fb86d..77e2a19 100644
--- a/libavcodec/ppc/fft_altivec.c
+++ b/libavcodec/ppc/fft_altivec.c
@@ -92,6 +92,5 @@ av_cold void ff_fft_init_altivec(FFTContext *s)
 {
 #if HAVE_IBM_GAS
     s->fft_calc = ff_fft_calc_altivec;
-    s->split_radix = 1;
 #endif
 }
-------------- next part --------------
>From 62827a1e10486c801f0edec0fc2738598cff746c Mon Sep 17 00:00:00 2001
From: Loren Merritt <pengvado at akuvian.org>
Date: Wed, 26 Aug 2009 10:08:06 +0100
Subject: [PATCH 3/3] altivec imdct
 1.8x faster than C iMDCT (excluding the FFT part) on a G4
 10% faster vorbis decoding

---
 libavcodec/ppc/fft_altivec.c |  113 ++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 110 insertions(+), 3 deletions(-)

diff --git a/libavcodec/ppc/fft_altivec.c b/libavcodec/ppc/fft_altivec.c
index 77e2a19..697896b 100644
--- a/libavcodec/ppc/fft_altivec.c
+++ b/libavcodec/ppc/fft_altivec.c
@@ -50,7 +50,7 @@ static void swizzle(vec_f *z, int n)
     }
 }
 
-void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
+static av_always_inline void fft_dispatch(FFTContext *s, FFTComplex *z, int do_swizzle)
 {
     register vec_f  v14 __asm__("v14") = {0,0,0,0};
     register vec_f  v15 __asm__("v15") = *(const vec_f*)ff_cos_16;
@@ -76,21 +76,128 @@ void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
         "subi 1,1,%1 \n"
         "bctrl \n"
         "addi 1,1,%1 \n"
-        ::"r"(ff_fft_dispatch_altivec[1][s->nbits-2]), "i"(12*sizeof(void*)),
+        ::"r"(ff_fft_dispatch_altivec[do_swizzle][s->nbits-2]), "i"(12*sizeof(void*)),
           "r"(zarg), "r"(cos_tabs),
           "v"(v14),"v"(v15),"v"(v16),"v"(v17),"v"(v18),"v"(v19),"v"(v20),"v"(v21),
           "v"(v22),"v"(v23),"v"(v24),"v"(v25),"v"(v26),"v"(v27),"v"(v28),"v"(v29)
         : "lr","ctr","r0","r4","r5","r6","r7","r8","r9","r10","r11",
           "v0","v1","v2","v3","v4","v5","v6","v7","v8","v9","v10","v11","v12","v13"
     );
-    if (s->nbits <= 4)
+    if (do_swizzle && s->nbits <= 4)
         swizzle((vec_f*)z, 1<<s->nbits);
 }
+
+static void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z)
+{
+    fft_dispatch(s, z, 1);
+}
+
+static void ff_imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+    int j, k;
+    int n = 1 << s->mdct_bits;
+    int n4 = n >> 2;
+    int n8 = n >> 3;
+    int n32 = n >> 5;
+    const uint16_t *revtabj = s->revtab;
+    const uint16_t *revtabk = s->revtab+n4;
+    const vec_f *tcos = (const vec_f*)(s->tcos+n8);
+    const vec_f *tsin = (const vec_f*)(s->tsin+n8);
+    const vec_f *pin = (const vec_f*)(input+n4);
+    vec_f *pout = (vec_f*)(output+n4);
+
+    /* pre rotation */
+    k = n32-1;
+    do {
+        vec_f cos,sin,cos0,sin0,cos1,sin1,re,im,r0,i0,r1,i1,a,b,c,d;
+#define CMULA(p,o0,o1,o2,o3)\
+        a = pin[ k*2+p];                       /* { z[k].re,    z[k].im,    z[k+1].re,  z[k+1].im  } */\
+        b = pin[-k*2-p-1];                     /* { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im } */\
+        re = vec_perm(a, b, vcprm(0,2,s0,s2)); /* { z[k].re,    z[k+1].re,  z[-k-2].re, z[-k-1].re } */\
+        im = vec_perm(a, b, vcprm(s3,s1,3,1)); /* { z[-k-1].im, z[-k-2].im, z[k+1].im,  z[k].im    } */\
+        cos = vec_perm(cos0, cos1, vcprm(o0,o1,s##o2,s##o3)); /* { cos[k], cos[k+1], cos[-k-2], cos[-k-1] } */\
+        sin = vec_perm(sin0, sin1, vcprm(o0,o1,s##o2,s##o3));\
+        r##p = im*cos - re*sin;\
+        i##p = re*cos + im*sin;
+#define STORE2(v,dst)\
+        j = dst;\
+        vec_ste(v, 0, output+j*2);\
+        vec_ste(v, 4, output+j*2);
+#define STORE8(p)\
+        a = vec_perm(r##p, i##p, vcprm(0,s0,0,s0));\
+        b = vec_perm(r##p, i##p, vcprm(1,s1,1,s1));\
+        c = vec_perm(r##p, i##p, vcprm(2,s2,2,s2));\
+        d = vec_perm(r##p, i##p, vcprm(3,s3,3,s3));\
+        STORE2(a, revtabk[ p*2-4]);\
+        STORE2(b, revtabk[ p*2-3]);\
+        STORE2(c, revtabj[-p*2+2]);\
+        STORE2(d, revtabj[-p*2+3]);
+
+        cos0 = tcos[k];
+        sin0 = tsin[k];
+        cos1 = tcos[-k-1];
+        sin1 = tsin[-k-1];
+        CMULA(0, 0,1,2,3);
+        CMULA(1, 2,3,0,1);
+        STORE8(0);
+        STORE8(1);
+        revtabj += 4;
+        revtabk -= 4;
+        k--;
+    } while(k >= 0);
+
+    fft_dispatch(s, (FFTComplex*)output, 0);
+
+    /* post rotation + reordering */
+    j = -n32;
+    k = n32-1;
+    do {
+        vec_f cos,sin,re,im,a,b,c,d;
+#define CMULB(d0,d1,o)\
+        re = pout[o*2];\
+        im = pout[o*2+1];\
+        cos = tcos[o];\
+        sin = tsin[o];\
+        d0 = im*sin - re*cos;\
+        d1 = re*sin + im*cos;
+
+        CMULB(a,b,j);
+        CMULB(c,d,k);
+        pout[2*j]   = vec_perm(a, d, vcprm(0,s3,1,s2));
+        pout[2*j+1] = vec_perm(a, d, vcprm(2,s1,3,s0));
+        pout[2*k]   = vec_perm(c, b, vcprm(0,s3,1,s2));
+        pout[2*k+1] = vec_perm(c, b, vcprm(2,s1,3,s0));
+        j++;
+        k--;
+    } while(k >= 0);
+}
+
+static void ff_imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+    int k;
+    int n = 1 << s->mdct_bits;
+    int n4 = n >> 2;
+    int n16 = n >> 4;
+    vec_u32 sign = {1<<31,1<<31,1<<31,1<<31};
+    vec_u32 *p0 = (vec_u32*)(output+n4);
+    vec_u32 *p1 = (vec_u32*)(output+n4*3);
+
+    ff_imdct_half_altivec(s, output+n4, input);
+
+    for (k = 0; k < n16; k++) {
+        vec_u32 a = p0[k] ^ sign;
+        vec_u32 b = p1[-k-1];
+        p0[-k-1] = vec_perm(a, a, vcprm(3,2,1,0));
+        p1[k]    = vec_perm(b, b, vcprm(3,2,1,0));
+    }
+}
 #endif // HAVE_IBM_GAS
 
 av_cold void ff_fft_init_altivec(FFTContext *s)
 {
 #if HAVE_IBM_GAS
     s->fft_calc = ff_fft_calc_altivec;
+    s->imdct_calc = ff_imdct_calc_altivec;
+    s->imdct_half = ff_imdct_half_altivec;
 #endif
 }
-- 
1.6.4.4




More information about the ffmpeg-devel mailing list