[FFmpeg-cvslog] r23019 - in branches/0.6: . libswscale libswscale/Makefile libswscale/bfin libswscale/bfin/internal_bfin.S libswscale/bfin/swscale_bfin.c libswscale/bfin/yuv2rgb_bfin.c libswscale/colorspace-test.c...

siretart subversion
Tue May 4 23:01:49 CEST 2010


Author: siretart
Date: Tue May  4 23:01:48 2010
New Revision: 23019

Log:
replace svn:externals on libswscale by a real copy

Added:
   branches/0.6/libswscale/
   branches/0.6/libswscale/Makefile
   branches/0.6/libswscale/bfin/
   branches/0.6/libswscale/bfin/internal_bfin.S
   branches/0.6/libswscale/bfin/swscale_bfin.c
   branches/0.6/libswscale/bfin/yuv2rgb_bfin.c
   branches/0.6/libswscale/colorspace-test.c
   branches/0.6/libswscale/libswscale.v
   branches/0.6/libswscale/mlib/
   branches/0.6/libswscale/mlib/yuv2rgb_mlib.c
   branches/0.6/libswscale/options.c
   branches/0.6/libswscale/ppc/
   branches/0.6/libswscale/ppc/swscale_altivec_template.c
   branches/0.6/libswscale/ppc/yuv2rgb_altivec.c
   branches/0.6/libswscale/rgb2rgb.c
   branches/0.6/libswscale/rgb2rgb.h
   branches/0.6/libswscale/rgb2rgb_template.c
   branches/0.6/libswscale/sparc/
   branches/0.6/libswscale/sparc/yuv2rgb_vis.c
   branches/0.6/libswscale/swscale-test.c
   branches/0.6/libswscale/swscale.c
   branches/0.6/libswscale/swscale.h
   branches/0.6/libswscale/swscale_internal.h
   branches/0.6/libswscale/swscale_template.c
   branches/0.6/libswscale/utils.c
   branches/0.6/libswscale/x86/
   branches/0.6/libswscale/x86/yuv2rgb_mmx.c
   branches/0.6/libswscale/x86/yuv2rgb_template.c
   branches/0.6/libswscale/x86/yuv2rgb_template2.c
   branches/0.6/libswscale/yuv2rgb.c
Modified:
   branches/0.6/   (props changed)

Added: branches/0.6/libswscale/Makefile
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/Makefile	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,22 @@
+include $(SUBDIR)../config.mak
+
+NAME = swscale
+FFLIBS = avutil
+
+HEADERS = swscale.h
+
+OBJS = options.o rgb2rgb.o swscale.o utils.o yuv2rgb.o
+
+OBJS-$(ARCH_BFIN)          +=  bfin/internal_bfin.o     \
+                               bfin/swscale_bfin.o      \
+                               bfin/yuv2rgb_bfin.o
+OBJS-$(CONFIG_MLIB)        +=  mlib/yuv2rgb_mlib.o
+OBJS-$(HAVE_ALTIVEC)       +=  ppc/yuv2rgb_altivec.o
+OBJS-$(HAVE_MMX)           +=  x86/yuv2rgb_mmx.o
+OBJS-$(HAVE_VIS)           +=  sparc/yuv2rgb_vis.o
+
+TESTPROGS = colorspace swscale
+
+DIRS = bfin mlib ppc sparc x86
+
+include $(SUBDIR)../subdir.mak

Added: branches/0.6/libswscale/bfin/internal_bfin.S
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/bfin/internal_bfin.S	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,606 @@
+/*
+ * Copyright (C) 2007 Marc Hoffman <marc.hoffman at analog.com>
+ *                    April 20, 2007
+ *
+ * Blackfin video color space converter operations
+ * convert I420 YV12 to RGB in various formats
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/*
+YUV420 to RGB565 conversion. This routine takes a YUV 420 planar macroblock
+and converts it to RGB565. R:5 bits, G:6 bits, B:5 bits.. packed into shorts.
+
+
+The following calculation is used for the conversion:
+
+  r = clipz((y-oy)*cy  + crv*(v-128))
+  g = clipz((y-oy)*cy  + cgv*(v-128) + cgu*(u-128))
+  b = clipz((y-oy)*cy  + cbu*(u-128))
+
+y,u,v are prescaled by a factor of 4 i.e. left-shifted to gain precision.
+
+
+New factorization to eliminate the truncation error which was
+occurring due to the byteop3p.
+
+
+1) Use the bytop16m to subtract quad bytes we use this in U8 this
+ then so the offsets need to be renormalized to 8bits.
+
+2) Scale operands up by a factor of 4 not 8 because Blackfin
+   multiplies include a shift.
+
+3) Compute into the accumulators cy*yx0, cy*yx1.
+
+4) Compute each of the linear equations:
+     r = clipz((y - oy) * cy  + crv * (v - 128))
+
+     g = clipz((y - oy) * cy  + cgv * (v - 128) + cgu * (u - 128))
+
+     b = clipz((y - oy) * cy  + cbu * (u - 128))
+
+   Reuse of the accumulators requires that we actually multiply
+   twice once with addition and the second time with a subtraction.
+
+   Because of this we need to compute the equations in the order R B
+   then G saving the writes for B in the case of 24/32 bit color
+   formats.
+
+   API: yuv2rgb_kind (uint8_t *Y, uint8_t *U, uint8_t *V, int *out,
+                      int dW, uint32_t *coeffs);
+
+       A          B
+       ---        ---
+       i2 = cb    i3 = cr
+       i1 = coeff i0 = y
+
+Where coeffs have the following layout in memory.
+
+uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv;
+
+coeffs is a pointer to oy.
+
+The {rgb} masks are only utilized by the 565 packing algorithm. Note the data
+replication is used to simplify the internal algorithms for the dual Mac
+architecture of BlackFin.
+
+All routines are exported with _ff_bfin_ as a symbol prefix.
+
+Rough performance gain compared against -O3:
+
+2779809/1484290 187.28%
+
+which translates to ~33c/pel to ~57c/pel for the reference vs 17.5
+c/pel for the optimized implementations. Not sure why there is such a
+huge variation on the reference codes on Blackfin I guess it must have
+to do with the memory system.
+*/
+
+#define mL3 .text
+#if defined(__FDPIC__) && CONFIG_SRAM
+#define mL1 .l1.text
+#else
+#define mL1 mL3
+#endif
+#define MEM mL1
+
+#define DEFUN(fname,where,interface) \
+        .section where;              \
+        .global _ff_bfin_ ## fname;  \
+        .type _ff_bfin_ ## fname, STT_FUNC; \
+        .align 8;                    \
+        _ff_bfin_ ## fname
+
+#define DEFUN_END(fname) \
+        .size _ff_bfin_ ## fname, . - _ff_bfin_ ## fname
+
+
+.text
+
+#define COEFF_LEN        11*4
+#define COEFF_REL_CY_OFF 4*4
+
+#define ARG_OUT   20
+#define ARG_W     24
+#define ARG_COEFF 28
+
+DEFUN(yuv2rgb565_line,MEM,
+   (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)):
+        link 0;
+        [--sp] = (r7:4);
+        p1 = [fp+ARG_OUT];
+        r3 = [fp+ARG_W];
+
+        i0 = r0;
+        i2 = r1;
+        i3 = r2;
+
+        r0 = [fp+ARG_COEFF];
+        i1 = r0;
+        b1 = i1;
+        l1 = COEFF_LEN;
+        m0 = COEFF_REL_CY_OFF;
+        p0 = r3;
+
+        r0   = [i0++];         // 2Y
+        r1.l = w[i2++];        // 2u
+        r1.h = w[i3++];        // 2v
+        p0 = p0>>2;
+
+        lsetup (.L0565, .L1565) lc0 = p0;
+
+        /*
+           uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv
+           r0 -- used to load 4ys
+           r1 -- used to load 2us,2vs
+           r4 -- y3,y2
+           r5 -- y1,y0
+           r6 -- u1,u0
+           r7 -- v1,v0
+        */
+                                                              r2=[i1++]; // oy
+.L0565:
+        /*
+        rrrrrrrr gggggggg bbbbbbbb
+         5432109876543210
+                    bbbbb >>3
+              gggggggg    <<3
+         rrrrrrrr         <<8
+         rrrrrggggggbbbbb
+        */
+        (r4,r5) = byteop16m (r1:0, r3:2)                   || r3=[i1++]; // oc
+        (r7,r6) = byteop16m (r1:0, r3:2) (r);
+        r5 = r5 << 2 (v);                                                // y1,y0
+        r4 = r4 << 2 (v);                                                // y3,y2
+        r6 = r6 << 2 (v)                                   || r0=[i1++]; // u1,u0, r0=zero
+        r7 = r7 << 2 (v)                                   || r1=[i1++]; // v1,v0  r1=cy
+        /* Y' = y*cy */
+        a1 = r1.h*r5.h, a0 = r1.l*r5.l                     || r1=[i1++]; // crv
+
+        /* R = Y+ crv*(Cr-128) */
+        r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+                a1 -= r1.h*r7.l,          a0 -= r1.l*r7.l  || r5=[i1++]; // rmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cbu
+        r2 = r2 >> 3 (v);
+        r3 = r2 & r5;
+
+        /* B = Y+ cbu*(Cb-128) */
+        r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l);
+                a1 -= r1.h*r6.l,          a0 -= r1.l*r6.l  || r5=[i1++]; // bmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cgu
+        r2 = r2 << 8 (v);
+        r2 = r2 & r5;
+        r3 = r3 | r2;
+
+        /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+                a1 += r1.h*r6.l,          a0 += r1.l*r6.l  || r1=[i1++]; // cgv
+        r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r5=[i1++m0]; // gmask
+        r2 = r2 << 3 (v);
+        r2 = r2 & r5;
+        r3 = r3 | r2;
+        [p1++]=r3                                          || r1=[i1++]; // cy
+
+        /* Y' = y*cy */
+
+        a1 = r1.h*r4.h, a0 = r1.l*r4.l                     || r1=[i1++]; // crv
+
+        /* R = Y+ crv*(Cr-128) */
+        r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h);
+                a1 -= r1.h*r7.h,          a0 -= r1.l*r7.h  || r5=[i1++]; // rmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cbu
+        r2 = r2 >> 3 (v);
+        r3 = r2 & r5;
+
+        /* B = Y+ cbu*(Cb-128) */
+        r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h);
+                a1 -= r1.h*r6.h,          a0 -= r1.l*r6.h  || r5=[i1++]; // bmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cgu
+        r2 = r2 << 8 (v);
+        r2 = r2 & r5;
+        r3 = r3 | r2;
+
+        /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+                a1 += r1.h*r6.h,          a0 += r1.l*r6.h  || r1=[i1++]; // cgv
+        r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h) || r5=[i1++]; // gmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r0   =  [i0++];        // 2Y
+        r2 = r2 << 3 (v)                                   || r1.l = w[i2++];        // 2u
+        r2 = r2 & r5;
+        r3 = r3 | r2;
+        [p1++]=r3                                          || r1.h = w[i3++];        // 2v
+.L1565:                                                       r2=[i1++]; // oy
+
+        l1 = 0;
+
+        (r7:4) = [sp++];
+        unlink;
+        rts;
+DEFUN_END(yuv2rgb565_line)
+
+DEFUN(yuv2rgb555_line,MEM,
+   (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)):
+        link 0;
+        [--sp] = (r7:4);
+        p1 = [fp+ARG_OUT];
+        r3 = [fp+ARG_W];
+
+        i0 = r0;
+        i2 = r1;
+        i3 = r2;
+
+        r0 = [fp+ARG_COEFF];
+        i1 = r0;
+        b1 = i1;
+        l1 = COEFF_LEN;
+        m0 = COEFF_REL_CY_OFF;
+        p0 = r3;
+
+        r0   = [i0++];         // 2Y
+        r1.l = w[i2++];        // 2u
+        r1.h = w[i3++];        // 2v
+        p0 = p0>>2;
+
+        lsetup (.L0555, .L1555) lc0 = p0;
+
+        /*
+           uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv
+           r0 -- used to load 4ys
+           r1 -- used to load 2us,2vs
+           r4 -- y3,y2
+           r5 -- y1,y0
+           r6 -- u1,u0
+           r7 -- v1,v0
+        */
+                                                              r2=[i1++]; // oy
+.L0555:
+        /*
+        rrrrrrrr gggggggg bbbbbbbb
+         5432109876543210
+                    bbbbb >>3
+               gggggggg   <<2
+          rrrrrrrr        <<7
+         xrrrrrgggggbbbbb
+        */
+
+        (r4,r5) = byteop16m (r1:0, r3:2)                   || r3=[i1++]; // oc
+        (r7,r6) = byteop16m (r1:0, r3:2) (r);
+        r5 = r5 << 2 (v);                                                // y1,y0
+        r4 = r4 << 2 (v);                                                // y3,y2
+        r6 = r6 << 2 (v)                                   || r0=[i1++]; // u1,u0, r0=zero
+        r7 = r7 << 2 (v)                                   || r1=[i1++]; // v1,v0  r1=cy
+        /* Y' = y*cy */
+        a1 = r1.h*r5.h, a0 = r1.l*r5.l                     || r1=[i1++]; // crv
+
+        /* R = Y+ crv*(Cr-128) */
+        r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+                a1 -= r1.h*r7.l,          a0 -= r1.l*r7.l  || r5=[i1++]; // rmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cbu
+        r2 = r2 >> 3 (v);
+        r3 = r2 & r5;
+
+        /* B = Y+ cbu*(Cb-128) */
+        r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l);
+                a1 -= r1.h*r6.l,          a0 -= r1.l*r6.l  || r5=[i1++]; // bmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cgu
+        r2 = r2 << 7 (v);
+        r2 = r2 & r5;
+        r3 = r3 | r2;
+
+        /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+                a1 += r1.h*r6.l,          a0 += r1.l*r6.l  || r1=[i1++]; // cgv
+        r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r5=[i1++m0]; // gmask
+        r2 = r2 << 2 (v);
+        r2 = r2 & r5;
+        r3 = r3 | r2;
+        [p1++]=r3                                          || r1=[i1++]; // cy
+
+        /* Y' = y*cy */
+
+        a1 = r1.h*r4.h, a0 = r1.l*r4.l                     || r1=[i1++]; // crv
+
+        /* R = Y+ crv*(Cr-128) */
+        r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h);
+                a1 -= r1.h*r7.h,          a0 -= r1.l*r7.h  || r5=[i1++]; // rmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cbu
+        r2 = r2 >> 3 (v);
+        r3 = r2 & r5;
+
+        /* B = Y+ cbu*(Cb-128) */
+        r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h);
+                a1 -= r1.h*r6.h,          a0 -= r1.l*r6.h  || r5=[i1++]; // bmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cgu
+        r2 = r2 << 7 (v);
+        r2 = r2 & r5;
+        r3 = r3 | r2;
+
+        /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+                a1 += r1.h*r6.h,          a0 += r1.l*r6.h  || r1=[i1++]; // cgv
+        r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h) || r5=[i1++]; // gmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r0=[i0++];     // 4Y
+        r2 = r2 << 2 (v)                                   || r1.l=w[i2++];  // 2u
+        r2 = r2 & r5;
+        r3 = r3 | r2;
+        [p1++]=r3                                          || r1.h=w[i3++]; // 2v
+
+.L1555:                                                       r2=[i1++]; // oy
+
+        l1 = 0;
+
+        (r7:4) = [sp++];
+        unlink;
+        rts;
+DEFUN_END(yuv2rgb555_line)
+
+DEFUN(yuv2rgb24_line,MEM,
+   (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)):
+        link 0;
+        [--sp] = (r7:4);
+        p1 = [fp+ARG_OUT];
+        r3 = [fp+ARG_W];
+        p2 = p1;
+        p2 += 3;
+
+        i0 = r0;
+        i2 = r1;
+        i3 = r2;
+
+        r0 = [fp+ARG_COEFF]; // coeff buffer
+        i1 = r0;
+        b1 = i1;
+        l1 = COEFF_LEN;
+        m0 = COEFF_REL_CY_OFF;
+        p0 = r3;
+
+        r0   = [i0++];         // 2Y
+        r1.l = w[i2++];        // 2u
+        r1.h = w[i3++];        // 2v
+        p0 = p0>>2;
+
+        lsetup (.L0888, .L1888) lc0 = p0;
+
+        /*
+           uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv
+           r0 -- used to load 4ys
+           r1 -- used to load 2us,2vs
+           r4 -- y3,y2
+           r5 -- y1,y0
+           r6 -- u1,u0
+           r7 -- v1,v0
+        */
+                                                              r2=[i1++]; // oy
+.L0888:
+        (r4,r5) = byteop16m (r1:0, r3:2)                   || r3=[i1++]; // oc
+        (r7,r6) = byteop16m (r1:0, r3:2) (r);
+        r5 = r5 << 2 (v);               // y1,y0
+        r4 = r4 << 2 (v);               // y3,y2
+        r6 = r6 << 2 (v) || r0=[i1++];  // u1,u0, r0=zero
+        r7 = r7 << 2 (v) || r1=[i1++];  // v1,v0  r1=cy
+
+        /* Y' = y*cy */
+        a1 = r1.h*r5.h, a0 = r1.l*r5.l                     || r1=[i1++]; // crv
+
+        /* R = Y+ crv*(Cr-128) */
+        r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+                a1 -= r1.h*r7.l,          a0 -= r1.l*r7.l  || r5=[i1++]; // rmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cbu
+        r2=r2>>16 || B[p1++]=r2;
+                     B[p2++]=r2;
+
+        /* B = Y+ cbu*(Cb-128) */
+        r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l);
+                a1 -= r1.h*r6.l,          a0 -= r1.l*r6.l  || r5=[i1++]; // bmask
+        r3 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cgu
+
+        /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+                a1 += r1.h*r6.l,          a0 += r1.l*r6.l  || r1=[i1++]; // cgv
+        r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r5=[i1++m0]; // gmask, oy,cy,zero
+
+        r2=r2>>16 || B[p1++]=r2;
+                     B[p2++]=r2;
+
+        r3=r3>>16 || B[p1++]=r3;
+                     B[p2++]=r3                            || r1=[i1++]; // cy
+
+        p1+=3;
+        p2+=3;
+        /* Y' = y*cy */
+        a1 = r1.h*r4.h, a0 = r1.l*r4.l                     || r1=[i1++]; // crv
+
+        /* R = Y+ crv*(Cr-128) */
+        r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h);
+                a1 -= r1.h*r7.h,          a0 -= r1.l*r7.h  || r5=[i1++]; // rmask
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cbu
+        r2=r2>>16 || B[p1++]=r2;
+        B[p2++]=r2;
+
+        /* B = Y+ cbu*(Cb-128) */
+        r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h);
+                a1 -= r1.h*r6.h,          a0 -= r1.l*r6.h  || r5=[i1++]; // bmask
+        r3 = byteop3p(r3:2, r1:0)(LO)                      || r1=[i1++]; // cgu
+
+        /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+                a1 += r1.h*r6.h,          a0 += r1.l*r6.h  || r1=[i1++]; // cgv
+        r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h);
+        r2 = byteop3p(r3:2, r1:0)(LO)                      || r5=[i1++]; // gmask
+        r2=r2>>16 || B[p1++]=r2 || r0 = [i0++];    // 4y
+                     B[p2++]=r2 || r1.l = w[i2++]; // 2u
+        r3=r3>>16 || B[p1++]=r3 || r1.h = w[i3++]; // 2v
+                     B[p2++]=r3 || r2=[i1++];      // oy
+
+        p1+=3;
+.L1888: p2+=3;
+
+        l1 = 0;
+
+        (r7:4) = [sp++];
+        unlink;
+        rts;
+DEFUN_END(yuv2rgb24_line)
+
+
+
+#define ARG_vdst        20
+#define ARG_width       24
+#define ARG_height      28
+#define ARG_lumStride   32
+#define ARG_chromStride 36
+#define ARG_srcStride   40
+
+DEFUN(uyvytoyv12, mL3,  (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                         long width, long height,
+                         long lumStride, long chromStride, long srcStride)):
+        link 0;
+        [--sp] = (r7:4,p5:4);
+
+        p0 = r1;       // Y top even
+
+        i2 = r2; // *u
+        r2 = [fp + ARG_vdst];
+        i3 = r2; // *v
+
+        r1 = [fp + ARG_srcStride];
+        r2 = r0 + r1;
+        r1 += -8;  // i0,i1 is pre read need to correct
+        m0 = r1;
+
+        i0 = r0;  // uyvy_T even
+        i1 = r2;  // uyvy_B odd
+
+        p2 = [fp + ARG_lumStride];
+        p1 = p0 + p2;  // Y bot odd
+
+        p5 = [fp + ARG_width];
+        p4 = [fp + ARG_height];
+        r0 = p5;
+        p4 = p4 >> 1;
+        p5 = p5 >> 2;
+
+        r2 = [fp + ARG_chromStride];
+        r0 = r0 >> 1;
+        r2 = r2 - r0;
+        m1 = r2;
+
+        /*   I0,I1 - src input line pointers
+         *   p0,p1 - luma output line pointers
+         *   I2    - dstU
+         *   I3    - dstV
+         */
+
+        lsetup (0f, 1f) lc1 = p4;   // H/2
+0:        r0 = [i0++] || r2 = [i1++];
+          r1 = [i0++] || r3 = [i1++];
+          r4 = byteop1p(r1:0, r3:2);
+          r5 = byteop1p(r1:0, r3:2) (r);
+          lsetup (2f, 3f) lc0 = p5; // W/4
+2:          r0 = r0 >> 8(v);
+            r1 = r1 >> 8(v);
+            r2 = r2 >> 8(v);
+            r3 = r3 >> 8(v);
+            r0 = bytepack(r0, r1);
+            r2 = bytepack(r2, r3)         ||  [p0++] = r0;    // yyyy
+            r6 = pack(r5.l, r4.l)         ||  [p1++] = r2;    // yyyy
+            r7 = pack(r5.h, r4.h)         ||  r0 = [i0++] || r2 = [i1++];
+            r6 = bytepack(r6, r7)         ||  r1 = [i0++] || r3 = [i1++];
+            r4 = byteop1p(r1:0, r3:2)     ||  w[i2++] = r6.l; // uu
+3:          r5 = byteop1p(r1:0, r3:2) (r) ||  w[i3++] = r6.h; // vv
+
+          i0 += m0;
+          i1 += m0;
+          i2 += m1;
+          i3 += m1;
+          p0 = p0 + p2;
+1:        p1 = p1 + p2;
+
+        (r7:4,p5:4) = [sp++];
+        unlink;
+        rts;
+DEFUN_END(uyvytoyv12)
+
+DEFUN(yuyvtoyv12, mL3,  (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                         long width, long height,
+                         long lumStride, long chromStride, long srcStride)):
+        link 0;
+        [--sp] = (r7:4,p5:4);
+
+        p0 = r1;       // Y top even
+
+        i2 = r2; // *u
+        r2 = [fp + ARG_vdst];
+        i3 = r2; // *v
+
+        r1 = [fp + ARG_srcStride];
+        r2 = r0 + r1;
+        r1 += -8;  // i0,i1 is pre read need to correct
+        m0 = r1;
+
+        i0 = r0;  // uyvy_T even
+        i1 = r2;  // uyvy_B odd
+
+        p2 = [fp + ARG_lumStride];
+        p1 = p0 + p2;  // Y bot odd
+
+        p5 = [fp + ARG_width];
+        p4 = [fp + ARG_height];
+        r0 = p5;
+        p4 = p4 >> 1;
+        p5 = p5 >> 2;
+
+        r2 = [fp + ARG_chromStride];
+        r0 = r0 >> 1;
+        r2 = r2 - r0;
+        m1 = r2;
+
+        /*   I0,I1 - src input line pointers
+         *   p0,p1 - luma output line pointers
+         *   I2    - dstU
+         *   I3    - dstV
+         */
+
+        lsetup (0f, 1f) lc1 = p4;   // H/2
+0:        r0 = [i0++] || r2 = [i1++];
+          r1 = [i0++] || r3 = [i1++];
+          r4 = bytepack(r0, r1);
+          r5 = bytepack(r2, r3);
+          lsetup (2f, 3f) lc0 = p5; // W/4
+2:          r0 = r0 >> 8(v) || [p0++] = r4;  // yyyy-even
+            r1 = r1 >> 8(v) || [p1++] = r5;  // yyyy-odd
+            r2 = r2 >> 8(v);
+            r3 = r3 >> 8(v);
+            r4 = byteop1p(r1:0, r3:2);
+            r5 = byteop1p(r1:0, r3:2) (r);
+            r6 = pack(r5.l, r4.l);
+            r7 = pack(r5.h, r4.h)         ||  r0 = [i0++] || r2 = [i1++];
+            r6 = bytepack(r6, r7)         ||  r1 = [i0++] || r3 = [i1++];
+            r4 = bytepack(r0, r1)         ||  w[i2++] = r6.l; // uu
+3:          r5 = bytepack(r2, r3)         ||  w[i3++] = r6.h; // vv
+
+          i0 += m0;
+          i1 += m0;
+          i2 += m1;
+          i3 += m1;
+          p0 = p0 + p2;
+1:        p1 = p1 + p2;
+
+        (r7:4,p5:4) = [sp++];
+        unlink;
+        rts;
+DEFUN_END(yuyvtoyv12)

Added: branches/0.6/libswscale/bfin/swscale_bfin.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/bfin/swscale_bfin.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2007 Marc Hoffman <marc.hoffman at analog.com>
+ *
+ * Blackfin software video scaler operations
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <assert.h>
+#include "config.h"
+#include <unistd.h>
+#include "libswscale/rgb2rgb.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+
+#if defined (__FDPIC__) && CONFIG_SRAM
+#define L1CODE __attribute__ ((l1_text))
+#else
+#define L1CODE
+#endif
+
+int ff_bfin_uyvytoyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                       long width, long height,
+                       long lumStride, long chromStride, long srcStride) L1CODE;
+
+int ff_bfin_yuyvtoyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                       long width, long height,
+                       long lumStride, long chromStride, long srcStride) L1CODE;
+
+static int uyvytoyv12_unscaled(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    uint8_t *dsty = dst[0] + dstStride[0]*srcSliceY;
+    uint8_t *dstu = dst[1] + dstStride[1]*srcSliceY/2;
+    uint8_t *dstv = dst[2] + dstStride[2]*srcSliceY/2;
+    uint8_t *ip   = src[0] + srcStride[0]*srcSliceY;
+    int w         = dstStride[0];
+
+    ff_bfin_uyvytoyv12(ip, dsty, dstu, dstv, w, srcSliceH,
+                       dstStride[0], dstStride[1], srcStride[0]);
+
+    return srcSliceH;
+}
+
+static int yuyvtoyv12_unscaled(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    uint8_t *dsty = dst[0] + dstStride[0]*srcSliceY;
+    uint8_t *dstu = dst[1] + dstStride[1]*srcSliceY/2;
+    uint8_t *dstv = dst[2] + dstStride[2]*srcSliceY/2;
+    uint8_t *ip   = src[0] + srcStride[0]*srcSliceY;
+    int w         = dstStride[0];
+
+    ff_bfin_yuyvtoyv12(ip, dsty, dstu, dstv, w, srcSliceH,
+                       dstStride[0], dstStride[1], srcStride[0]);
+
+    return srcSliceH;
+}
+
+
+void ff_bfin_get_unscaled_swscale(SwsContext *c)
+{
+    SwsFunc swScale = c->swScale;
+    if (c->flags & SWS_CPU_CAPS_BFIN)
+        if (c->dstFormat == PIX_FMT_YUV420P)
+            if (c->srcFormat == PIX_FMT_UYVY422) {
+                av_log (NULL, AV_LOG_VERBOSE, "selecting Blackfin optimized uyvytoyv12_unscaled\n");
+                c->swScale = uyvytoyv12_unscaled;
+            }
+        if (c->dstFormat == PIX_FMT_YUV420P)
+            if (c->srcFormat == PIX_FMT_YUYV422) {
+                av_log (NULL, AV_LOG_VERBOSE, "selecting Blackfin optimized yuyvtoyv12_unscaled\n");
+                c->swScale = yuyvtoyv12_unscaled;
+            }
+}

Added: branches/0.6/libswscale/bfin/yuv2rgb_bfin.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/bfin/yuv2rgb_bfin.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2007 Marc Hoffman <marc.hoffman at analog.com>
+ *
+ * Blackfin video color space converter operations
+ * convert I420 YV12 to RGB in various formats
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <assert.h>
+#include "config.h"
+#include <unistd.h>
+#include "libswscale/rgb2rgb.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+
+#if defined(__FDPIC__) && CONFIG_SRAM
+#define L1CODE __attribute__ ((l1_text))
+#else
+#define L1CODE
+#endif
+
+void ff_bfin_yuv2rgb555_line(uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out,
+                             int w, uint32_t *coeffs) L1CODE;
+
+void ff_bfin_yuv2rgb565_line(uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out,
+                             int w, uint32_t *coeffs) L1CODE;
+
+void ff_bfin_yuv2rgb24_line(uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out,
+                            int w, uint32_t *coeffs) L1CODE;
+
+typedef void (* ltransform)(uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out,
+                            int w, uint32_t *coeffs);
+
+
+static void bfin_prepare_coefficients(SwsContext *c, int rgb, int masks)
+{
+    int oy;
+    oy      = c->yOffset&0xffff;
+    oy      = oy >> 3; // keep everything U8.0 for offset calculation
+
+    c->oc   = 128*0x01010101U;
+    c->oy   =  oy*0x01010101U;
+
+    /* copy 64bit vector coeffs down to 32bit vector coeffs */
+    c->cy  = c->yCoeff;
+    c->zero = 0;
+
+    if (rgb) {
+        c->crv = c->vrCoeff;
+        c->cbu = c->ubCoeff;
+        c->cgu = c->ugCoeff;
+        c->cgv = c->vgCoeff;
+    } else {
+        c->crv = c->ubCoeff;
+        c->cbu = c->vrCoeff;
+        c->cgu = c->vgCoeff;
+        c->cgv = c->ugCoeff;
+    }
+
+
+    if (masks == 555) {
+        c->rmask = 0x001f * 0x00010001U;
+        c->gmask = 0x03e0 * 0x00010001U;
+        c->bmask = 0x7c00 * 0x00010001U;
+    } else if (masks == 565) {
+        c->rmask = 0x001f * 0x00010001U;
+        c->gmask = 0x07e0 * 0x00010001U;
+        c->bmask = 0xf800 * 0x00010001U;
+    }
+}
+
+static int core_yuv420_rgb(SwsContext *c,
+                           uint8_t **in, int *instrides,
+                           int srcSliceY, int srcSliceH,
+                           uint8_t **oplanes, int *outstrides,
+                           ltransform lcscf, int rgb, int masks)
+{
+    uint8_t *py,*pu,*pv,*op;
+    int w  = instrides[0];
+    int h2 = srcSliceH>>1;
+    int i;
+
+    bfin_prepare_coefficients(c, rgb, masks);
+
+    py = in[0];
+    pu = in[1+(1^rgb)];
+    pv = in[1+(0^rgb)];
+
+    op = oplanes[0] + srcSliceY*outstrides[0];
+
+    for (i=0;i<h2;i++) {
+
+        lcscf(py, pu, pv, op, w, &c->oy);
+
+        py += instrides[0];
+        op += outstrides[0];
+
+        lcscf(py, pu, pv, op, w, &c->oy);
+
+        py += instrides[0];
+        pu += instrides[1];
+        pv += instrides[2];
+        op += outstrides[0];
+    }
+
+    return srcSliceH;
+}
+
+
+static int bfin_yuv420_rgb555(SwsContext *c,
+                              uint8_t **in, int *instrides,
+                              int srcSliceY, int srcSliceH,
+                              uint8_t **oplanes, int *outstrides)
+{
+    return core_yuv420_rgb(c, in, instrides, srcSliceY, srcSliceH, oplanes,
+                           outstrides, ff_bfin_yuv2rgb555_line, 1, 555);
+}
+
+static int bfin_yuv420_bgr555(SwsContext *c,
+                              uint8_t **in, int *instrides,
+                              int srcSliceY, int srcSliceH,
+                              uint8_t **oplanes, int *outstrides)
+{
+    return core_yuv420_rgb(c, in, instrides, srcSliceY, srcSliceH, oplanes,
+                           outstrides, ff_bfin_yuv2rgb555_line, 0, 555);
+}
+
+static int bfin_yuv420_rgb24(SwsContext *c,
+                             uint8_t **in, int *instrides,
+                             int srcSliceY, int srcSliceH,
+                             uint8_t **oplanes, int *outstrides)
+{
+    return core_yuv420_rgb(c, in, instrides, srcSliceY, srcSliceH, oplanes,
+                           outstrides, ff_bfin_yuv2rgb24_line, 1, 888);
+}
+
+static int bfin_yuv420_bgr24(SwsContext *c,
+                             uint8_t **in, int *instrides,
+                             int srcSliceY, int srcSliceH,
+                             uint8_t **oplanes, int *outstrides)
+{
+    return core_yuv420_rgb(c, in, instrides, srcSliceY, srcSliceH, oplanes,
+                           outstrides, ff_bfin_yuv2rgb24_line, 0, 888);
+}
+
+static int bfin_yuv420_rgb565(SwsContext *c,
+                              uint8_t **in, int *instrides,
+                              int srcSliceY, int srcSliceH,
+                              uint8_t **oplanes, int *outstrides)
+{
+    return core_yuv420_rgb(c, in, instrides, srcSliceY, srcSliceH, oplanes,
+                           outstrides, ff_bfin_yuv2rgb565_line, 1, 565);
+}
+
+static int bfin_yuv420_bgr565(SwsContext *c,
+                              uint8_t **in, int *instrides,
+                              int srcSliceY, int srcSliceH,
+                              uint8_t **oplanes, int *outstrides)
+{
+    return core_yuv420_rgb(c, in, instrides, srcSliceY, srcSliceH, oplanes,
+                           outstrides, ff_bfin_yuv2rgb565_line, 0, 565);
+}
+
+
+SwsFunc ff_yuv2rgb_get_func_ptr_bfin(SwsContext *c)
+{
+    SwsFunc f;
+
+    switch(c->dstFormat) {
+    case PIX_FMT_RGB555: f = bfin_yuv420_rgb555; break;
+    case PIX_FMT_BGR555: f = bfin_yuv420_bgr555; break;
+    case PIX_FMT_RGB565: f = bfin_yuv420_rgb565; break;
+    case PIX_FMT_BGR565: f = bfin_yuv420_bgr565; break;
+    case PIX_FMT_RGB24:  f = bfin_yuv420_rgb24;  break;
+    case PIX_FMT_BGR24:  f = bfin_yuv420_bgr24;  break;
+    default:
+        return 0;
+    }
+
+    av_log(c, AV_LOG_INFO, "BlackFin accelerated color space converter %s\n",
+           sws_format_name (c->dstFormat));
+
+    return f;
+}

Added: branches/0.6/libswscale/colorspace-test.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/colorspace-test.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2002 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <string.h>              /* for memset() */
+#include <unistd.h>
+#include <stdlib.h>
+#include <inttypes.h>
+
+#include "swscale.h"
+#include "rgb2rgb.h"
+
+#define SIZE 1000
+#define srcByte 0x55
+#define dstByte 0xBB
+
+#define FUNC(s,d,n) {s,d,#n,n}
+
+static int cpu_caps;
+
+static char *args_parse(int argc, char *argv[])
+{
+    int o;
+
+    while ((o = getopt(argc, argv, "m23")) != -1) {
+        switch (o) {
+        case 'm':
+            cpu_caps |= SWS_CPU_CAPS_MMX;
+            break;
+        case '2':
+            cpu_caps |= SWS_CPU_CAPS_MMX2;
+            break;
+        case '3':
+            cpu_caps |= SWS_CPU_CAPS_3DNOW;
+            break;
+        default:
+            av_log(NULL, AV_LOG_ERROR, "Unknown option %c\n", o);
+        }
+    }
+
+    return argv[optind];
+}
+
+int main(int argc, char **argv)
+{
+    int i, funcNum;
+    uint8_t *srcBuffer= (uint8_t*)av_malloc(SIZE);
+    uint8_t *dstBuffer= (uint8_t*)av_malloc(SIZE);
+    int failedNum=0;
+    int passedNum=0;
+
+    if (!srcBuffer || !dstBuffer)
+        return -1;
+
+    av_log(NULL, AV_LOG_INFO, "memory corruption test ...\n");
+    args_parse(argc, argv);
+    av_log(NULL, AV_LOG_INFO, "CPU capabilities forced to %x\n", cpu_caps);
+    sws_rgb2rgb_init(cpu_caps);
+
+    for(funcNum=0; ; funcNum++) {
+        struct func_info_s {
+            int src_bpp;
+            int dst_bpp;
+            const char *name;
+            void (*func)(const uint8_t *src, uint8_t *dst, long src_size);
+        } func_info[] = {
+            FUNC(2, 2, rgb15to16),
+            FUNC(2, 3, rgb15to24),
+            FUNC(2, 4, rgb15to32),
+            FUNC(2, 3, rgb16to24),
+            FUNC(2, 4, rgb16to32),
+            FUNC(3, 2, rgb24to15),
+            FUNC(3, 2, rgb24to16),
+            FUNC(3, 4, rgb24to32),
+            FUNC(4, 2, rgb32to15),
+            FUNC(4, 2, rgb32to16),
+            FUNC(4, 3, rgb32to24),
+            FUNC(2, 2, rgb16to15),
+            FUNC(2, 2, rgb15tobgr15),
+            FUNC(2, 2, rgb15tobgr16),
+            FUNC(2, 3, rgb15tobgr24),
+            FUNC(2, 4, rgb15tobgr32),
+            FUNC(2, 2, rgb16tobgr15),
+            FUNC(2, 2, rgb16tobgr16),
+            FUNC(2, 3, rgb16tobgr24),
+            FUNC(2, 4, rgb16tobgr32),
+            FUNC(3, 2, rgb24tobgr15),
+            FUNC(3, 2, rgb24tobgr16),
+            FUNC(3, 3, rgb24tobgr24),
+            FUNC(3, 4, rgb24tobgr32),
+            FUNC(4, 2, rgb32tobgr15),
+            FUNC(4, 2, rgb32tobgr16),
+            FUNC(4, 3, rgb32tobgr24),
+            FUNC(4, 4, rgb32tobgr32),
+            FUNC(0, 0, NULL)
+        };
+        int width;
+        int failed=0;
+        int srcBpp=0;
+        int dstBpp=0;
+
+        if (!func_info[funcNum].func) break;
+
+        av_log(NULL, AV_LOG_INFO,".");
+        memset(srcBuffer, srcByte, SIZE);
+
+        for(width=63; width>0; width--) {
+            int dstOffset;
+            for(dstOffset=128; dstOffset<196; dstOffset+=4) {
+                int srcOffset;
+                memset(dstBuffer, dstByte, SIZE);
+
+                for(srcOffset=128; srcOffset<196; srcOffset+=4) {
+                    uint8_t *src= srcBuffer+srcOffset;
+                    uint8_t *dst= dstBuffer+dstOffset;
+                    const char *name=NULL;
+
+                    if(failed) break; //don't fill the screen with shit ...
+
+                    srcBpp = func_info[funcNum].src_bpp;
+                    dstBpp = func_info[funcNum].dst_bpp;
+                    name   = func_info[funcNum].name;
+
+                    func_info[funcNum].func(src, dst, width*srcBpp);
+
+                    if(!srcBpp) break;
+
+                    for(i=0; i<SIZE; i++) {
+                        if(srcBuffer[i]!=srcByte) {
+                            av_log(NULL, AV_LOG_INFO, "src damaged at %d w:%d src:%d dst:%d %s\n",
+                                   i, width, srcOffset, dstOffset, name);
+                            failed=1;
+                            break;
+                        }
+                    }
+                    for(i=0; i<dstOffset; i++) {
+                        if(dstBuffer[i]!=dstByte) {
+                            av_log(NULL, AV_LOG_INFO, "dst damaged at %d w:%d src:%d dst:%d %s\n",
+                                   i, width, srcOffset, dstOffset, name);
+                            failed=1;
+                            break;
+                        }
+                    }
+                    for(i=dstOffset + width*dstBpp; i<SIZE; i++) {
+                        if(dstBuffer[i]!=dstByte) {
+                            av_log(NULL, AV_LOG_INFO, "dst damaged at %d w:%d src:%d dst:%d %s\n",
+                                   i, width, srcOffset, dstOffset, name);
+                            failed=1;
+                            break;
+                        }
+                    }
+                }
+            }
+        }
+        if(failed) failedNum++;
+        else if(srcBpp) passedNum++;
+    }
+
+    av_log(NULL, AV_LOG_INFO, "\n%d converters passed, %d converters randomly overwrote memory\n", passedNum, failedNum);
+    return failedNum;
+}

Added: branches/0.6/libswscale/libswscale.v
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/libswscale.v	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,4 @@
+LIBSWSCALE_$MAJOR {
+        global: swscale_*; sws_*; ff_*;
+        local: *;
+};

Added: branches/0.6/libswscale/mlib/yuv2rgb_mlib.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/mlib/yuv2rgb_mlib.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,88 @@
+/*
+ * software YUV to RGB converter using mediaLib
+ *
+ * Copyright (C) 2003 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <mlib_types.h>
+#include <mlib_status.h>
+#include <mlib_sys.h>
+#include <mlib_video.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#include "libswscale/swscale.h"
+
+static int mlib_YUV2ARGB420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    if(c->srcFormat == PIX_FMT_YUV422P) {
+        srcStride[1] *= 2;
+        srcStride[2] *= 2;
+    }
+
+    assert(srcStride[1] == srcStride[2]);
+
+    mlib_VideoColorYUV2ARGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
+                               srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+    return srcSliceH;
+}
+
+static int mlib_YUV2ABGR420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    if(c->srcFormat == PIX_FMT_YUV422P) {
+        srcStride[1] *= 2;
+        srcStride[2] *= 2;
+    }
+
+    assert(srcStride[1] == srcStride[2]);
+
+    mlib_VideoColorYUV2ABGR420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
+                               srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+    return srcSliceH;
+}
+
+static int mlib_YUV2RGB420_24(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                              int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    if(c->srcFormat == PIX_FMT_YUV422P) {
+        srcStride[1] *= 2;
+        srcStride[2] *= 2;
+    }
+
+    assert(srcStride[1] == srcStride[2]);
+
+    mlib_VideoColorYUV2RGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
+                              srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+    return srcSliceH;
+}
+
+
+SwsFunc ff_yuv2rgb_init_mlib(SwsContext *c)
+{
+    switch(c->dstFormat) {
+    case PIX_FMT_RGB24: return mlib_YUV2RGB420_24;
+    case PIX_FMT_BGR32: return mlib_YUV2ARGB420_32;
+    case PIX_FMT_RGB32: return mlib_YUV2ABGR420_32;
+    default: return NULL;
+    }
+}
+

Added: branches/0.6/libswscale/options.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/options.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avutil.h"
+#include "libavcodec/opt.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+
+static const char * sws_context_to_name(void * ptr)
+{
+    return "swscaler";
+}
+
+#define OFFSET(x) offsetof(SwsContext, x)
+#define DEFAULT 0
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+    { "sws_flags", "scaler/cpu flags", OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, 0, UINT_MAX, VE, "sws_flags" },
+    { "fast_bilinear", "fast bilinear", 0, FF_OPT_TYPE_CONST, SWS_FAST_BILINEAR, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "bilinear", "bilinear", 0, FF_OPT_TYPE_CONST, SWS_BILINEAR, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "bicubic", "bicubic", 0, FF_OPT_TYPE_CONST, SWS_BICUBIC, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "experimental", "experimental", 0, FF_OPT_TYPE_CONST, SWS_X, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "neighbor", "nearest neighbor", 0, FF_OPT_TYPE_CONST, SWS_POINT, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "area", "averaging area", 0, FF_OPT_TYPE_CONST, SWS_AREA, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "bicublin", "luma bicubic, chroma bilinear", 0, FF_OPT_TYPE_CONST, SWS_BICUBLIN, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "gauss", "gaussian", 0, FF_OPT_TYPE_CONST, SWS_GAUSS, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "sinc", "sinc", 0, FF_OPT_TYPE_CONST, SWS_SINC, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "lanczos", "lanczos", 0, FF_OPT_TYPE_CONST, SWS_LANCZOS, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "spline", "natural bicubic spline", 0, FF_OPT_TYPE_CONST, SWS_SPLINE, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "print_info", "print info", 0, FF_OPT_TYPE_CONST, SWS_PRINT_INFO, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "accurate_rnd", "accurate rounding", 0, FF_OPT_TYPE_CONST, SWS_ACCURATE_RND, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "mmx", "MMX SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_MMX, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "mmx2", "MMX2 SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_MMX2, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "3dnow", "3DNOW SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_3DNOW, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "altivec", "AltiVec SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_ALTIVEC, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "bfin", "Blackfin SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_BFIN, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "full_chroma_int", "full chroma interpolation", 0 , FF_OPT_TYPE_CONST, SWS_FULL_CHR_H_INT, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "full_chroma_inp", "full chroma input", 0 , FF_OPT_TYPE_CONST, SWS_FULL_CHR_H_INP, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { "bitexact", "", 0 , FF_OPT_TYPE_CONST, SWS_BITEXACT, INT_MIN, INT_MAX, VE, "sws_flags" },
+    { NULL }
+};
+
+const AVClass sws_context_class = { "SWScaler", sws_context_to_name, options };

Added: branches/0.6/libswscale/ppc/swscale_altivec_template.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/ppc/swscale_altivec_template.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,545 @@
+/*
+ * AltiVec-enhanced yuv2yuvX
+ *
+ * Copyright (C) 2004 Romain Dolbeau <romain at dolbeau.org>
+ * based on the equivalent C code in swscale.c
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define vzero vec_splat_s32(0)
+
+static inline void
+altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW)
+{
+    register int i;
+    vector unsigned int altivec_vectorShiftInt19 =
+        vec_add(vec_splat_u32(10), vec_splat_u32(9));
+    if ((unsigned long)dest % 16) {
+        /* badly aligned store, we force store alignment */
+        /* and will handle load misalignment on val w/ vec_perm */
+        vector unsigned char perm1;
+        vector signed int v1;
+        for (i = 0 ; (i < dstW) &&
+            (((unsigned long)dest + i) % 16) ; i++) {
+                int t = val[i] >> 19;
+                dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
+        }
+        perm1 = vec_lvsl(i << 2, val);
+        v1 = vec_ld(i << 2, val);
+        for ( ; i < (dstW - 15); i+=16) {
+            int offset = i << 2;
+            vector signed int v2 = vec_ld(offset + 16, val);
+            vector signed int v3 = vec_ld(offset + 32, val);
+            vector signed int v4 = vec_ld(offset + 48, val);
+            vector signed int v5 = vec_ld(offset + 64, val);
+            vector signed int v12 = vec_perm(v1, v2, perm1);
+            vector signed int v23 = vec_perm(v2, v3, perm1);
+            vector signed int v34 = vec_perm(v3, v4, perm1);
+            vector signed int v45 = vec_perm(v4, v5, perm1);
+
+            vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
+            vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
+            vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
+            vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
+            vector unsigned short vs1 = vec_packsu(vA, vB);
+            vector unsigned short vs2 = vec_packsu(vC, vD);
+            vector unsigned char vf = vec_packsu(vs1, vs2);
+            vec_st(vf, i, dest);
+            v1 = v5;
+        }
+    } else { // dest is properly aligned, great
+        for (i = 0; i < (dstW - 15); i+=16) {
+            int offset = i << 2;
+            vector signed int v1 = vec_ld(offset, val);
+            vector signed int v2 = vec_ld(offset + 16, val);
+            vector signed int v3 = vec_ld(offset + 32, val);
+            vector signed int v4 = vec_ld(offset + 48, val);
+            vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19);
+            vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19);
+            vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19);
+            vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19);
+            vector unsigned short vs1 = vec_packsu(v5, v6);
+            vector unsigned short vs2 = vec_packsu(v7, v8);
+            vector unsigned char vf = vec_packsu(vs1, vs2);
+            vec_st(vf, i, dest);
+        }
+    }
+    for ( ; i < dstW ; i++) {
+        int t = val[i] >> 19;
+        dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
+    }
+}
+
+static inline void
+yuv2yuvX_altivec_real(const int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+                      const int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+                      uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+{
+    const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
+    register int i, j;
+    {
+        DECLARE_ALIGNED(16, int, val)[dstW];
+
+        for (i = 0; i < (dstW -7); i+=4) {
+            vec_st(vini, i << 2, val);
+        }
+        for (; i < dstW; i++) {
+            val[i] = (1 << 18);
+        }
+
+        for (j = 0; j < lumFilterSize; j++) {
+            vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter);
+            vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter);
+            vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
+            vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
+
+            perm = vec_lvsl(0, lumSrc[j]);
+            l1 = vec_ld(0, lumSrc[j]);
+
+            for (i = 0; i < (dstW - 7); i+=8) {
+                int offset = i << 2;
+                vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]);
+
+                vector signed int v1 = vec_ld(offset, val);
+                vector signed int v2 = vec_ld(offset + 16, val);
+
+                vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7]
+
+                vector signed int i1 = vec_mule(vLumFilter, ls);
+                vector signed int i2 = vec_mulo(vLumFilter, ls);
+
+                vector signed int vf1 = vec_mergeh(i1, i2);
+                vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
+
+                vector signed int vo1 = vec_add(v1, vf1);
+                vector signed int vo2 = vec_add(v2, vf2);
+
+                vec_st(vo1, offset, val);
+                vec_st(vo2, offset + 16, val);
+
+                l1 = l2;
+            }
+            for ( ; i < dstW; i++) {
+                val[i] += lumSrc[j][i] * lumFilter[j];
+            }
+        }
+        altivec_packIntArrayToCharArray(val, dest, dstW);
+    }
+    if (uDest != 0) {
+        DECLARE_ALIGNED(16, int, u)[chrDstW];
+        DECLARE_ALIGNED(16, int, v)[chrDstW];
+
+        for (i = 0; i < (chrDstW -7); i+=4) {
+            vec_st(vini, i << 2, u);
+            vec_st(vini, i << 2, v);
+        }
+        for (; i < chrDstW; i++) {
+            u[i] = (1 << 18);
+            v[i] = (1 << 18);
+        }
+
+        for (j = 0; j < chrFilterSize; j++) {
+            vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter);
+            vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter);
+            vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0);
+            vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter
+
+            perm = vec_lvsl(0, chrSrc[j]);
+            l1 = vec_ld(0, chrSrc[j]);
+            l1_V = vec_ld(2048 << 1, chrSrc[j]);
+
+            for (i = 0; i < (chrDstW - 7); i+=8) {
+                int offset = i << 2;
+                vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]);
+                vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]);
+
+                vector signed int v1 = vec_ld(offset, u);
+                vector signed int v2 = vec_ld(offset + 16, u);
+                vector signed int v1_V = vec_ld(offset, v);
+                vector signed int v2_V = vec_ld(offset + 16, v);
+
+                vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7]
+                vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055]
+
+                vector signed int i1 = vec_mule(vChrFilter, ls);
+                vector signed int i2 = vec_mulo(vChrFilter, ls);
+                vector signed int i1_V = vec_mule(vChrFilter, ls_V);
+                vector signed int i2_V = vec_mulo(vChrFilter, ls_V);
+
+                vector signed int vf1 = vec_mergeh(i1, i2);
+                vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
+                vector signed int vf1_V = vec_mergeh(i1_V, i2_V);
+                vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
+
+                vector signed int vo1 = vec_add(v1, vf1);
+                vector signed int vo2 = vec_add(v2, vf2);
+                vector signed int vo1_V = vec_add(v1_V, vf1_V);
+                vector signed int vo2_V = vec_add(v2_V, vf2_V);
+
+                vec_st(vo1, offset, u);
+                vec_st(vo2, offset + 16, u);
+                vec_st(vo1_V, offset, v);
+                vec_st(vo2_V, offset + 16, v);
+
+                l1 = l2;
+                l1_V = l2_V;
+            }
+            for ( ; i < chrDstW; i++) {
+                u[i] += chrSrc[j][i] * chrFilter[j];
+                v[i] += chrSrc[j][i + 2048] * chrFilter[j];
+            }
+        }
+        altivec_packIntArrayToCharArray(u, uDest, chrDstW);
+        altivec_packIntArrayToCharArray(v, vDest, chrDstW);
+    }
+}
+
+static inline void hScale_altivec_real(int16_t *dst, int dstW,
+                                       const uint8_t *src, int srcW,
+                                       int xInc, const int16_t *filter,
+                                       const int16_t *filterPos, int filterSize)
+{
+    register int i;
+    DECLARE_ALIGNED(16, int, tempo)[4];
+
+    if (filterSize % 4) {
+        for (i=0; i<dstW; i++) {
+            register int j;
+            register int srcPos = filterPos[i];
+            register int val = 0;
+            for (j=0; j<filterSize; j++) {
+                val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+            }
+            dst[i] = FFMIN(val>>7, (1<<15)-1);
+        }
+    }
+    else
+    switch (filterSize) {
+    case 4:
+    {
+    for (i=0; i<dstW; i++) {
+        register int srcPos = filterPos[i];
+
+        vector unsigned char src_v0 = vec_ld(srcPos, src);
+        vector unsigned char src_v1, src_vF;
+        vector signed short src_v, filter_v;
+        vector signed int val_vEven, val_s;
+        if ((((int)src + srcPos)% 16) > 12) {
+            src_v1 = vec_ld(srcPos + 16, src);
+        }
+        src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+        src_v = // vec_unpackh sign-extends...
+            (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+        // now put our elements in the even slots
+        src_v = vec_mergeh(src_v, (vector signed short)vzero);
+
+        filter_v = vec_ld(i << 3, filter);
+        // The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
+
+        // The neat trick: We only care for half the elements,
+        // high or low depending on (i<<3)%16 (it's 0 or 8 here),
+        // and we're going to use vec_mule, so we choose
+        // carefully how to "unpack" the elements into the even slots.
+        if ((i << 3) % 16)
+            filter_v = vec_mergel(filter_v, (vector signed short)vzero);
+        else
+            filter_v = vec_mergeh(filter_v, (vector signed short)vzero);
+
+        val_vEven = vec_mule(src_v, filter_v);
+        val_s = vec_sums(val_vEven, vzero);
+        vec_st(val_s, 0, tempo);
+        dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1);
+    }
+    }
+    break;
+
+    case 8:
+    {
+    for (i=0; i<dstW; i++) {
+        register int srcPos = filterPos[i];
+
+        vector unsigned char src_v0 = vec_ld(srcPos, src);
+        vector unsigned char src_v1, src_vF;
+        vector signed short src_v, filter_v;
+        vector signed int val_v, val_s;
+        if ((((int)src + srcPos)% 16) > 8) {
+            src_v1 = vec_ld(srcPos + 16, src);
+        }
+        src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+        src_v = // vec_unpackh sign-extends...
+            (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+        filter_v = vec_ld(i << 4, filter);
+        // the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
+
+        val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
+        val_s = vec_sums(val_v, vzero);
+        vec_st(val_s, 0, tempo);
+        dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1);
+    }
+    }
+    break;
+
+    case 16:
+    {
+        for (i=0; i<dstW; i++) {
+            register int srcPos = filterPos[i];
+
+            vector unsigned char src_v0 = vec_ld(srcPos, src);
+            vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
+            vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+            vector signed short src_vA = // vec_unpackh sign-extends...
+                (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+            vector signed short src_vB = // vec_unpackh sign-extends...
+                (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+
+            vector signed short filter_v0 = vec_ld(i << 5, filter);
+            vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
+            // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
+
+            vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
+            vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+            vector signed int val_s = vec_sums(val_v, vzero);
+
+            vec_st(val_s, 0, tempo);
+            dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1);
+        }
+    }
+    break;
+
+    default:
+    {
+    for (i=0; i<dstW; i++) {
+        register int j;
+        register int srcPos = filterPos[i];
+
+        vector signed int val_s, val_v = (vector signed int)vzero;
+        vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
+        vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter);
+
+        vector unsigned char src_v0 = vec_ld(srcPos, src);
+        vector unsigned char permS = vec_lvsl(srcPos, src);
+
+        for (j = 0 ; j < filterSize - 15; j += 16) {
+            vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
+            vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
+
+            vector signed short src_vA = // vec_unpackh sign-extends...
+                (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+            vector signed short src_vB = // vec_unpackh sign-extends...
+                (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+
+            vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+            vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
+            vector signed short filter_v0  = vec_perm(filter_v0R, filter_v1R, permF);
+            vector signed short filter_v1  = vec_perm(filter_v1R, filter_v2R, permF);
+
+            vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
+            val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+            filter_v0R = filter_v2R;
+            src_v0 = src_v1;
+        }
+
+        if (j < filterSize-7) {
+            // loading src_v0 is useless, it's already done above
+            //vector unsigned char src_v0 = vec_ld(srcPos + j, src);
+            vector unsigned char src_v1, src_vF;
+            vector signed short src_v, filter_v1R, filter_v;
+            if ((((int)src + srcPos)% 16) > 8) {
+                src_v1 = vec_ld(srcPos + j + 16, src);
+            }
+            src_vF = vec_perm(src_v0, src_v1, permS);
+
+            src_v = // vec_unpackh sign-extends...
+                (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+            // loading filter_v0R is useless, it's already done above
+            //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
+            filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+            filter_v = vec_perm(filter_v0R, filter_v1R, permF);
+
+            val_v = vec_msums(src_v, filter_v, val_v);
+        }
+
+        val_s = vec_sums(val_v, vzero);
+
+        vec_st(val_s, 0, tempo);
+        dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1);
+    }
+
+    }
+    }
+}
+
+static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                                              int srcSliceH, uint8_t* dstParam[], int dstStride_a[])
+{
+    uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
+    // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
+    uint8_t *ysrc = src[0];
+    uint8_t *usrc = src[1];
+    uint8_t *vsrc = src[2];
+    const int width = c->srcW;
+    const int height = srcSliceH;
+    const int lumStride = srcStride[0];
+    const int chromStride = srcStride[1];
+    const int dstStride = dstStride_a[0];
+    const vector unsigned char yperm = vec_lvsl(0, ysrc);
+    const int vertLumPerChroma = 2;
+    register unsigned int y;
+
+    if (width&15) {
+        yv12toyuy2(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride);
+        return srcSliceH;
+    }
+
+    /* This code assumes:
+
+    1) dst is 16 bytes-aligned
+    2) dstStride is a multiple of 16
+    3) width is a multiple of 16
+    4) lum & chrom stride are multiples of 8
+    */
+
+    for (y=0; y<height; y++) {
+        int i;
+        for (i = 0; i < width - 31; i+= 32) {
+            const unsigned int j = i >> 1;
+            vector unsigned char v_yA = vec_ld(i, ysrc);
+            vector unsigned char v_yB = vec_ld(i + 16, ysrc);
+            vector unsigned char v_yC = vec_ld(i + 32, ysrc);
+            vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
+            vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
+            vector unsigned char v_uA = vec_ld(j, usrc);
+            vector unsigned char v_uB = vec_ld(j + 16, usrc);
+            vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
+            vector unsigned char v_vA = vec_ld(j, vsrc);
+            vector unsigned char v_vB = vec_ld(j + 16, vsrc);
+            vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
+            vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+            vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
+            vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
+            vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
+            vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
+            vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
+            vec_st(v_yuy2_0, (i << 1), dst);
+            vec_st(v_yuy2_1, (i << 1) + 16, dst);
+            vec_st(v_yuy2_2, (i << 1) + 32, dst);
+            vec_st(v_yuy2_3, (i << 1) + 48, dst);
+        }
+        if (i < width) {
+            const unsigned int j = i >> 1;
+            vector unsigned char v_y1 = vec_ld(i, ysrc);
+            vector unsigned char v_u = vec_ld(j, usrc);
+            vector unsigned char v_v = vec_ld(j, vsrc);
+            vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+            vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
+            vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
+            vec_st(v_yuy2_0, (i << 1), dst);
+            vec_st(v_yuy2_1, (i << 1) + 16, dst);
+        }
+        if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
+            usrc += chromStride;
+            vsrc += chromStride;
+        }
+        ysrc += lumStride;
+        dst += dstStride;
+    }
+
+    return srcSliceH;
+}
+
+static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                                              int srcSliceH, uint8_t* dstParam[], int dstStride_a[])
+{
+    uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
+    // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
+    uint8_t *ysrc = src[0];
+    uint8_t *usrc = src[1];
+    uint8_t *vsrc = src[2];
+    const int width = c->srcW;
+    const int height = srcSliceH;
+    const int lumStride = srcStride[0];
+    const int chromStride = srcStride[1];
+    const int dstStride = dstStride_a[0];
+    const int vertLumPerChroma = 2;
+    const vector unsigned char yperm = vec_lvsl(0, ysrc);
+    register unsigned int y;
+
+    if (width&15) {
+        yv12touyvy(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride);
+        return srcSliceH;
+    }
+
+    /* This code assumes:
+
+    1) dst is 16 bytes-aligned
+    2) dstStride is a multiple of 16
+    3) width is a multiple of 16
+    4) lum & chrom stride are multiples of 8
+    */
+
+    for (y=0; y<height; y++) {
+        int i;
+        for (i = 0; i < width - 31; i+= 32) {
+            const unsigned int j = i >> 1;
+            vector unsigned char v_yA = vec_ld(i, ysrc);
+            vector unsigned char v_yB = vec_ld(i + 16, ysrc);
+            vector unsigned char v_yC = vec_ld(i + 32, ysrc);
+            vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
+            vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
+            vector unsigned char v_uA = vec_ld(j, usrc);
+            vector unsigned char v_uB = vec_ld(j + 16, usrc);
+            vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
+            vector unsigned char v_vA = vec_ld(j, vsrc);
+            vector unsigned char v_vB = vec_ld(j + 16, vsrc);
+            vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
+            vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+            vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
+            vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
+            vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
+            vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2);
+            vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2);
+            vec_st(v_uyvy_0, (i << 1), dst);
+            vec_st(v_uyvy_1, (i << 1) + 16, dst);
+            vec_st(v_uyvy_2, (i << 1) + 32, dst);
+            vec_st(v_uyvy_3, (i << 1) + 48, dst);
+        }
+        if (i < width) {
+            const unsigned int j = i >> 1;
+            vector unsigned char v_y1 = vec_ld(i, ysrc);
+            vector unsigned char v_u = vec_ld(j, usrc);
+            vector unsigned char v_v = vec_ld(j, vsrc);
+            vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+            vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
+            vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
+            vec_st(v_uyvy_0, (i << 1), dst);
+            vec_st(v_uyvy_1, (i << 1) + 16, dst);
+        }
+        if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
+            usrc += chromStride;
+            vsrc += chromStride;
+        }
+        ysrc += lumStride;
+        dst += dstStride;
+    }
+    return srcSliceH;
+}

Added: branches/0.6/libswscale/ppc/yuv2rgb_altivec.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/ppc/yuv2rgb_altivec.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,953 @@
+/*
+ * AltiVec acceleration for colorspace conversion
+ *
+ * copyright (C) 2004 Marc Hoffman <marc.hoffman at analog.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+Convert I420 YV12 to RGB in various formats,
+  it rejects images that are not in 420 formats,
+  it rejects images that don't have widths of multiples of 16,
+  it rejects images that don't have heights of multiples of 2.
+Reject defers to C simulation code.
+
+Lots of optimizations to be done here.
+
+1. Need to fix saturation code. I just couldn't get it to fly with packs
+   and adds, so we currently use max/min to clip.
+
+2. The inefficient use of chroma loading needs a bit of brushing up.
+
+3. Analysis of pipeline stalls needs to be done. Use shark to identify
+   pipeline stalls.
+
+
+MODIFIED to calculate coeffs from currently selected color space.
+MODIFIED core to be a macro where you specify the output format.
+ADDED UYVY conversion which is never called due to some thing in swscale.
+CORRECTED algorithim selection to be strict on input formats.
+ADDED runtime detection of AltiVec.
+
+ADDED altivec_yuv2packedX vertical scl + RGB converter
+
+March 27,2004
+PERFORMANCE ANALYSIS
+
+The C version uses 25% of the processor or ~250Mips for D1 video rawvideo
+used as test.
+The AltiVec version uses 10% of the processor or ~100Mips for D1 video
+same sequence.
+
+720 * 480 * 30  ~10MPS
+
+so we have roughly 10 clocks per pixel. This is too high, something has
+to be wrong.
+
+OPTIMIZED clip codes to utilize vec_max and vec_packs removing the
+need for vec_min.
+
+OPTIMIZED DST OUTPUT cache/DMA controls. We are pretty much guaranteed to have
+the input video frame, it was just decompressed so it probably resides in L1
+caches. However, we are creating the output video stream. This needs to use the
+DSTST instruction to optimize for the cache. We couple this with the fact that
+we are not going to be visiting the input buffer again so we mark it Least
+Recently Used. This shaves 25% of the processor cycles off.
+
+Now memcpy is the largest mips consumer in the system, probably due
+to the inefficient X11 stuff.
+
+GL libraries seem to be very slow on this machine 1.33Ghz PB running
+Jaguar, this is not the case for my 1Ghz PB.  I thought it might be
+a versioning issue, however I have libGL.1.2.dylib for both
+machines. (We need to figure this out now.)
+
+GL2 libraries work now with patch for RGB32.
+
+NOTE: quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor.
+
+Integrated luma prescaling adjustment for saturation/contrast/brightness
+adjustment.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <assert.h>
+#include "config.h"
+#include "libswscale/rgb2rgb.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+
+#undef PROFILE_THE_BEAST
+#undef INC_SCALING
+
+typedef unsigned char ubyte;
+typedef signed char   sbyte;
+
+
+/* RGB interleaver, 16 planar pels 8-bit samples per channel in
+   homogeneous vector registers x0,x1,x2 are interleaved with the
+   following technique:
+
+      o0 = vec_mergeh (x0,x1);
+      o1 = vec_perm (o0, x2, perm_rgb_0);
+      o2 = vec_perm (o0, x2, perm_rgb_1);
+      o3 = vec_mergel (x0,x1);
+      o4 = vec_perm (o3,o2,perm_rgb_2);
+      o5 = vec_perm (o3,o2,perm_rgb_3);
+
+  perm_rgb_0:   o0(RG).h v1(B) --> o1*
+              0   1  2   3   4
+             rgbr|gbrg|brgb|rgbr
+             0010 0100 1001 0010
+             0102 3145 2673 894A
+
+  perm_rgb_1:   o0(RG).h v1(B) --> o2
+              0   1  2   3   4
+             gbrg|brgb|bbbb|bbbb
+             0100 1001 1111 1111
+             B5CD 6EF7 89AB CDEF
+
+  perm_rgb_2:   o3(RG).l o2(rgbB.l) --> o4*
+              0   1  2   3   4
+             gbrg|brgb|rgbr|gbrg
+             1111 1111 0010 0100
+             89AB CDEF 0182 3945
+
+  perm_rgb_2:   o3(RG).l o2(rgbB.l) ---> o5*
+              0   1  2   3   4
+             brgb|rgbr|gbrg|brgb
+             1001 0010 0100 1001
+             a67b 89cA BdCD eEFf
+
+*/
+static
+const vector unsigned char
+  perm_rgb_0 = {0x00,0x01,0x10,0x02,0x03,0x11,0x04,0x05,
+                0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a},
+  perm_rgb_1 = {0x0b,0x15,0x0c,0x0d,0x16,0x0e,0x0f,0x17,
+                0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f},
+  perm_rgb_2 = {0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
+                0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05},
+  perm_rgb_3 = {0x1a,0x06,0x07,0x1b,0x08,0x09,0x1c,0x0a,
+                0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f};
+
+#define vec_merge3(x2,x1,x0,y0,y1,y2)       \
+do {                                        \
+    __typeof__(x0) o0,o2,o3;                \
+        o0 = vec_mergeh (x0,x1);            \
+        y0 = vec_perm (o0, x2, perm_rgb_0); \
+        o2 = vec_perm (o0, x2, perm_rgb_1); \
+        o3 = vec_mergel (x0,x1);            \
+        y1 = vec_perm (o3,o2,perm_rgb_2);   \
+        y2 = vec_perm (o3,o2,perm_rgb_3);   \
+} while(0)
+
+#define vec_mstbgr24(x0,x1,x2,ptr)      \
+do {                                    \
+    __typeof__(x0) _0,_1,_2;            \
+    vec_merge3 (x0,x1,x2,_0,_1,_2);     \
+    vec_st (_0, 0, ptr++);              \
+    vec_st (_1, 0, ptr++);              \
+    vec_st (_2, 0, ptr++);              \
+}  while (0)
+
+#define vec_mstrgb24(x0,x1,x2,ptr)      \
+do {                                    \
+    __typeof__(x0) _0,_1,_2;            \
+    vec_merge3 (x2,x1,x0,_0,_1,_2);     \
+    vec_st (_0, 0, ptr++);              \
+    vec_st (_1, 0, ptr++);              \
+    vec_st (_2, 0, ptr++);              \
+}  while (0)
+
+/* pack the pixels in rgb0 format
+   msb R
+   lsb 0
+*/
+#define vec_mstrgb32(T,x0,x1,x2,x3,ptr)                                       \
+do {                                                                          \
+    T _0,_1,_2,_3;                                                            \
+    _0 = vec_mergeh (x0,x1);                                                  \
+    _1 = vec_mergeh (x2,x3);                                                  \
+    _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
+    _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
+    vec_st (_2, 0*16, (T *)ptr);                                              \
+    vec_st (_3, 1*16, (T *)ptr);                                              \
+    _0 = vec_mergel (x0,x1);                                                  \
+    _1 = vec_mergel (x2,x3);                                                  \
+    _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
+    _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
+    vec_st (_2, 2*16, (T *)ptr);                                              \
+    vec_st (_3, 3*16, (T *)ptr);                                              \
+    ptr += 4;                                                                 \
+}  while (0)
+
+/*
+
+  | 1     0       1.4021   | | Y |
+  | 1    -0.3441 -0.7142   |x| Cb|
+  | 1     1.7718  0        | | Cr|
+
+
+  Y:      [-128 127]
+  Cb/Cr : [-128 127]
+
+  typical yuv conversion work on Y: 0-255 this version has been optimized for jpeg decode.
+
+*/
+
+
+
+
+#define vec_unh(x) \
+    (vector signed short) \
+        vec_perm(x,(__typeof__(x)){0}, \
+                 ((vector unsigned char){0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\
+                                         0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07}))
+#define vec_unl(x) \
+    (vector signed short) \
+        vec_perm(x,(__typeof__(x)){0}, \
+                 ((vector unsigned char){0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\
+                                         0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F}))
+
+#define vec_clip_s16(x) \
+    vec_max (vec_min (x, ((vector signed short){235,235,235,235,235,235,235,235})), \
+                         ((vector signed short){ 16, 16, 16, 16, 16, 16, 16, 16}))
+
+#define vec_packclp(x,y) \
+    (vector unsigned char)vec_packs \
+        ((vector unsigned short)vec_max (x,((vector signed short) {0})), \
+         (vector unsigned short)vec_max (y,((vector signed short) {0})))
+
+//#define out_pixels(a,b,c,ptr) vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),a,a,a,ptr)
+
+
+static inline void cvtyuvtoRGB (SwsContext *c,
+                                vector signed short Y, vector signed short U, vector signed short V,
+                                vector signed short *R, vector signed short *G, vector signed short *B)
+{
+    vector signed   short vx,ux,uvx;
+
+    Y = vec_mradds (Y, c->CY, c->OY);
+    U  = vec_sub (U,(vector signed short)
+                    vec_splat((vector signed short){128},0));
+    V  = vec_sub (V,(vector signed short)
+                    vec_splat((vector signed short){128},0));
+
+    //   ux  = (CBU*(u<<c->CSHIFT)+0x4000)>>15;
+    ux = vec_sl (U, c->CSHIFT);
+    *B = vec_mradds (ux, c->CBU, Y);
+
+    // vx  = (CRV*(v<<c->CSHIFT)+0x4000)>>15;
+    vx = vec_sl (V, c->CSHIFT);
+    *R = vec_mradds (vx, c->CRV, Y);
+
+    // uvx = ((CGU*u) + (CGV*v))>>15;
+    uvx = vec_mradds (U, c->CGU, Y);
+    *G  = vec_mradds (V, c->CGV, uvx);
+}
+
+
+/*
+  ------------------------------------------------------------------------------
+  CS converters
+  ------------------------------------------------------------------------------
+*/
+
+
+#define DEFCSP420_CVT(name,out_pixels)                                  \
+static int altivec_##name (SwsContext *c,                               \
+                           unsigned char **in, int *instrides,          \
+                           int srcSliceY,        int srcSliceH,         \
+                           unsigned char **oplanes, int *outstrides)    \
+{                                                                       \
+    int w = c->srcW;                                                    \
+    int h = srcSliceH;                                                  \
+    int i,j;                                                            \
+    int instrides_scl[3];                                               \
+    vector unsigned char y0,y1;                                         \
+                                                                        \
+    vector signed char  u,v;                                            \
+                                                                        \
+    vector signed short Y0,Y1,Y2,Y3;                                    \
+    vector signed short U,V;                                            \
+    vector signed short vx,ux,uvx;                                      \
+    vector signed short vx0,ux0,uvx0;                                   \
+    vector signed short vx1,ux1,uvx1;                                   \
+    vector signed short R0,G0,B0;                                       \
+    vector signed short R1,G1,B1;                                       \
+    vector unsigned char R,G,B;                                         \
+                                                                        \
+    vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP;                  \
+    vector unsigned char align_perm;                                    \
+                                                                        \
+    vector signed short                                                 \
+        lCY  = c->CY,                                                   \
+        lOY  = c->OY,                                                   \
+        lCRV = c->CRV,                                                  \
+        lCBU = c->CBU,                                                  \
+        lCGU = c->CGU,                                                  \
+        lCGV = c->CGV;                                                  \
+                                                                        \
+    vector unsigned short lCSHIFT = c->CSHIFT;                          \
+                                                                        \
+    ubyte *y1i   = in[0];                                               \
+    ubyte *y2i   = in[0]+instrides[0];                                  \
+    ubyte *ui    = in[1];                                               \
+    ubyte *vi    = in[2];                                               \
+                                                                        \
+    vector unsigned char *oute                                          \
+        = (vector unsigned char *)                                      \
+            (oplanes[0]+srcSliceY*outstrides[0]);                       \
+    vector unsigned char *outo                                          \
+        = (vector unsigned char *)                                      \
+            (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);         \
+                                                                        \
+                                                                        \
+    instrides_scl[0] = instrides[0]*2-w;  /* the loop moves y{1,2}i by w */ \
+    instrides_scl[1] = instrides[1]-w/2;  /* the loop moves ui by w/2 */    \
+    instrides_scl[2] = instrides[2]-w/2;  /* the loop moves vi by w/2 */    \
+                                                                        \
+                                                                        \
+    for (i=0;i<h/2;i++) {                                               \
+        vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);          \
+        vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);          \
+                                                                        \
+        for (j=0;j<w/16;j++) {                                          \
+                                                                        \
+            y1ivP = (vector unsigned char *)y1i;                        \
+            y2ivP = (vector unsigned char *)y2i;                        \
+            uivP  = (vector unsigned char *)ui;                         \
+            vivP  = (vector unsigned char *)vi;                         \
+                                                                        \
+            align_perm = vec_lvsl (0, y1i);                             \
+            y0 = (vector unsigned char)                                 \
+                 vec_perm (y1ivP[0], y1ivP[1], align_perm);             \
+                                                                        \
+            align_perm = vec_lvsl (0, y2i);                             \
+            y1 = (vector unsigned char)                                 \
+                 vec_perm (y2ivP[0], y2ivP[1], align_perm);             \
+                                                                        \
+            align_perm = vec_lvsl (0, ui);                              \
+            u = (vector signed char)                                    \
+                vec_perm (uivP[0], uivP[1], align_perm);                \
+                                                                        \
+            align_perm = vec_lvsl (0, vi);                              \
+            v = (vector signed char)                                    \
+                vec_perm (vivP[0], vivP[1], align_perm);                \
+                                                                        \
+            u  = (vector signed char)                                   \
+                 vec_sub (u,(vector signed char)                        \
+                          vec_splat((vector signed char){128},0));      \
+            v  = (vector signed char)                                   \
+                 vec_sub (v,(vector signed char)                        \
+                          vec_splat((vector signed char){128},0));      \
+                                                                        \
+            U  = vec_unpackh (u);                                       \
+            V  = vec_unpackh (v);                                       \
+                                                                        \
+                                                                        \
+            Y0 = vec_unh (y0);                                          \
+            Y1 = vec_unl (y0);                                          \
+            Y2 = vec_unh (y1);                                          \
+            Y3 = vec_unl (y1);                                          \
+                                                                        \
+            Y0 = vec_mradds (Y0, lCY, lOY);                             \
+            Y1 = vec_mradds (Y1, lCY, lOY);                             \
+            Y2 = vec_mradds (Y2, lCY, lOY);                             \
+            Y3 = vec_mradds (Y3, lCY, lOY);                             \
+                                                                        \
+            /*   ux  = (CBU*(u<<CSHIFT)+0x4000)>>15 */                  \
+            ux = vec_sl (U, lCSHIFT);                                   \
+            ux = vec_mradds (ux, lCBU, (vector signed short){0});       \
+            ux0  = vec_mergeh (ux,ux);                                  \
+            ux1  = vec_mergel (ux,ux);                                  \
+                                                                        \
+            /* vx  = (CRV*(v<<CSHIFT)+0x4000)>>15;        */            \
+            vx = vec_sl (V, lCSHIFT);                                   \
+            vx = vec_mradds (vx, lCRV, (vector signed short){0});       \
+            vx0  = vec_mergeh (vx,vx);                                  \
+            vx1  = vec_mergel (vx,vx);                                  \
+                                                                        \
+            /* uvx = ((CGU*u) + (CGV*v))>>15 */                         \
+            uvx = vec_mradds (U, lCGU, (vector signed short){0});       \
+            uvx = vec_mradds (V, lCGV, uvx);                            \
+            uvx0 = vec_mergeh (uvx,uvx);                                \
+            uvx1 = vec_mergel (uvx,uvx);                                \
+                                                                        \
+            R0 = vec_add (Y0,vx0);                                      \
+            G0 = vec_add (Y0,uvx0);                                     \
+            B0 = vec_add (Y0,ux0);                                      \
+            R1 = vec_add (Y1,vx1);                                      \
+            G1 = vec_add (Y1,uvx1);                                     \
+            B1 = vec_add (Y1,ux1);                                      \
+                                                                        \
+            R  = vec_packclp (R0,R1);                                   \
+            G  = vec_packclp (G0,G1);                                   \
+            B  = vec_packclp (B0,B1);                                   \
+                                                                        \
+            out_pixels(R,G,B,oute);                                     \
+                                                                        \
+            R0 = vec_add (Y2,vx0);                                      \
+            G0 = vec_add (Y2,uvx0);                                     \
+            B0 = vec_add (Y2,ux0);                                      \
+            R1 = vec_add (Y3,vx1);                                      \
+            G1 = vec_add (Y3,uvx1);                                     \
+            B1 = vec_add (Y3,ux1);                                      \
+            R  = vec_packclp (R0,R1);                                   \
+            G  = vec_packclp (G0,G1);                                   \
+            B  = vec_packclp (B0,B1);                                   \
+                                                                        \
+                                                                        \
+            out_pixels(R,G,B,outo);                                     \
+                                                                        \
+            y1i  += 16;                                                 \
+            y2i  += 16;                                                 \
+            ui   += 8;                                                  \
+            vi   += 8;                                                  \
+                                                                        \
+        }                                                               \
+                                                                        \
+        outo  += (outstrides[0])>>4;                                    \
+        oute  += (outstrides[0])>>4;                                    \
+                                                                        \
+        ui    += instrides_scl[1];                                      \
+        vi    += instrides_scl[2];                                      \
+        y1i   += instrides_scl[0];                                      \
+        y2i   += instrides_scl[0];                                      \
+    }                                                                   \
+    return srcSliceH;                                                   \
+}
+
+
+#define out_abgr(a,b,c,ptr)  vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),c,b,a,ptr)
+#define out_bgra(a,b,c,ptr)  vec_mstrgb32(__typeof__(a),c,b,a,((__typeof__ (a)){255}),ptr)
+#define out_rgba(a,b,c,ptr)  vec_mstrgb32(__typeof__(a),a,b,c,((__typeof__ (a)){255}),ptr)
+#define out_argb(a,b,c,ptr)  vec_mstrgb32(__typeof__(a),((__typeof__ (a)){255}),a,b,c,ptr)
+#define out_rgb24(a,b,c,ptr) vec_mstrgb24(a,b,c,ptr)
+#define out_bgr24(a,b,c,ptr) vec_mstbgr24(a,b,c,ptr)
+
+DEFCSP420_CVT (yuv2_abgr, out_abgr)
+#if 1
+DEFCSP420_CVT (yuv2_bgra, out_bgra)
+#else
+static int altivec_yuv2_bgra32 (SwsContext *c,
+                                unsigned char **in, int *instrides,
+                                int srcSliceY,        int srcSliceH,
+                                unsigned char **oplanes, int *outstrides)
+{
+    int w = c->srcW;
+    int h = srcSliceH;
+    int i,j;
+    int instrides_scl[3];
+    vector unsigned char y0,y1;
+
+    vector signed char  u,v;
+
+    vector signed short Y0,Y1,Y2,Y3;
+    vector signed short U,V;
+    vector signed short vx,ux,uvx;
+    vector signed short vx0,ux0,uvx0;
+    vector signed short vx1,ux1,uvx1;
+    vector signed short R0,G0,B0;
+    vector signed short R1,G1,B1;
+    vector unsigned char R,G,B;
+
+    vector unsigned char *uivP, *vivP;
+    vector unsigned char align_perm;
+
+    vector signed short
+        lCY  = c->CY,
+        lOY  = c->OY,
+        lCRV = c->CRV,
+        lCBU = c->CBU,
+        lCGU = c->CGU,
+        lCGV = c->CGV;
+
+    vector unsigned short lCSHIFT = c->CSHIFT;
+
+    ubyte *y1i   = in[0];
+    ubyte *y2i   = in[0]+w;
+    ubyte *ui    = in[1];
+    ubyte *vi    = in[2];
+
+    vector unsigned char *oute
+        = (vector unsigned char *)
+          (oplanes[0]+srcSliceY*outstrides[0]);
+    vector unsigned char *outo
+        = (vector unsigned char *)
+          (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);
+
+
+    instrides_scl[0] = instrides[0];
+    instrides_scl[1] = instrides[1]-w/2;  /* the loop moves ui by w/2 */
+    instrides_scl[2] = instrides[2]-w/2;  /* the loop moves vi by w/2 */
+
+
+    for (i=0;i<h/2;i++) {
+        vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);
+        vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);
+
+        for (j=0;j<w/16;j++) {
+
+            y0 = vec_ldl (0,y1i);
+            y1 = vec_ldl (0,y2i);
+            uivP = (vector unsigned char *)ui;
+            vivP = (vector unsigned char *)vi;
+
+            align_perm = vec_lvsl (0, ui);
+            u  = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm);
+
+            align_perm = vec_lvsl (0, vi);
+            v  = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm);
+            u  = (vector signed char)
+                 vec_sub (u,(vector signed char)
+                          vec_splat((vector signed char){128},0));
+
+            v  = (vector signed char)
+                 vec_sub (v, (vector signed char)
+                          vec_splat((vector signed char){128},0));
+
+            U  = vec_unpackh (u);
+            V  = vec_unpackh (v);
+
+
+            Y0 = vec_unh (y0);
+            Y1 = vec_unl (y0);
+            Y2 = vec_unh (y1);
+            Y3 = vec_unl (y1);
+
+            Y0 = vec_mradds (Y0, lCY, lOY);
+            Y1 = vec_mradds (Y1, lCY, lOY);
+            Y2 = vec_mradds (Y2, lCY, lOY);
+            Y3 = vec_mradds (Y3, lCY, lOY);
+
+            /*   ux  = (CBU*(u<<CSHIFT)+0x4000)>>15 */
+            ux = vec_sl (U, lCSHIFT);
+            ux = vec_mradds (ux, lCBU, (vector signed short){0});
+            ux0  = vec_mergeh (ux,ux);
+            ux1  = vec_mergel (ux,ux);
+
+            /* vx  = (CRV*(v<<CSHIFT)+0x4000)>>15;        */
+            vx = vec_sl (V, lCSHIFT);
+            vx = vec_mradds (vx, lCRV, (vector signed short){0});
+            vx0  = vec_mergeh (vx,vx);
+            vx1  = vec_mergel (vx,vx);
+            /* uvx = ((CGU*u) + (CGV*v))>>15 */
+            uvx = vec_mradds (U, lCGU, (vector signed short){0});
+            uvx = vec_mradds (V, lCGV, uvx);
+            uvx0 = vec_mergeh (uvx,uvx);
+            uvx1 = vec_mergel (uvx,uvx);
+            R0 = vec_add (Y0,vx0);
+            G0 = vec_add (Y0,uvx0);
+            B0 = vec_add (Y0,ux0);
+            R1 = vec_add (Y1,vx1);
+            G1 = vec_add (Y1,uvx1);
+            B1 = vec_add (Y1,ux1);
+            R  = vec_packclp (R0,R1);
+            G  = vec_packclp (G0,G1);
+            B  = vec_packclp (B0,B1);
+
+            out_argb(R,G,B,oute);
+            R0 = vec_add (Y2,vx0);
+            G0 = vec_add (Y2,uvx0);
+            B0 = vec_add (Y2,ux0);
+            R1 = vec_add (Y3,vx1);
+            G1 = vec_add (Y3,uvx1);
+            B1 = vec_add (Y3,ux1);
+            R  = vec_packclp (R0,R1);
+            G  = vec_packclp (G0,G1);
+            B  = vec_packclp (B0,B1);
+
+            out_argb(R,G,B,outo);
+            y1i  += 16;
+            y2i  += 16;
+            ui   += 8;
+            vi   += 8;
+
+        }
+
+        outo  += (outstrides[0])>>4;
+        oute  += (outstrides[0])>>4;
+
+        ui    += instrides_scl[1];
+        vi    += instrides_scl[2];
+        y1i   += instrides_scl[0];
+        y2i   += instrides_scl[0];
+    }
+    return srcSliceH;
+}
+
+#endif
+
+
+DEFCSP420_CVT (yuv2_rgba, out_rgba)
+DEFCSP420_CVT (yuv2_argb, out_argb)
+DEFCSP420_CVT (yuv2_rgb24,  out_rgb24)
+DEFCSP420_CVT (yuv2_bgr24,  out_bgr24)
+
+
+// uyvy|uyvy|uyvy|uyvy
+// 0123 4567 89ab cdef
+static
+const vector unsigned char
+    demux_u = {0x10,0x00,0x10,0x00,
+               0x10,0x04,0x10,0x04,
+               0x10,0x08,0x10,0x08,
+               0x10,0x0c,0x10,0x0c},
+    demux_v = {0x10,0x02,0x10,0x02,
+               0x10,0x06,0x10,0x06,
+               0x10,0x0A,0x10,0x0A,
+               0x10,0x0E,0x10,0x0E},
+    demux_y = {0x10,0x01,0x10,0x03,
+               0x10,0x05,0x10,0x07,
+               0x10,0x09,0x10,0x0B,
+               0x10,0x0D,0x10,0x0F};
+
+/*
+  this is so I can play live CCIR raw video
+*/
+static int altivec_uyvy_rgb32 (SwsContext *c,
+                               unsigned char **in, int *instrides,
+                               int srcSliceY,        int srcSliceH,
+                               unsigned char **oplanes, int *outstrides)
+{
+    int w = c->srcW;
+    int h = srcSliceH;
+    int i,j;
+    vector unsigned char uyvy;
+    vector signed   short Y,U,V;
+    vector signed   short R0,G0,B0,R1,G1,B1;
+    vector unsigned char  R,G,B;
+    vector unsigned char *out;
+    ubyte *img;
+
+    img = in[0];
+    out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
+
+    for (i=0;i<h;i++) {
+        for (j=0;j<w/16;j++) {
+            uyvy = vec_ld (0, img);
+            U = (vector signed short)
+                vec_perm (uyvy, (vector unsigned char){0}, demux_u);
+
+            V = (vector signed short)
+                vec_perm (uyvy, (vector unsigned char){0}, demux_v);
+
+            Y = (vector signed short)
+                vec_perm (uyvy, (vector unsigned char){0}, demux_y);
+
+            cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
+
+            uyvy = vec_ld (16, img);
+            U = (vector signed short)
+                vec_perm (uyvy, (vector unsigned char){0}, demux_u);
+
+            V = (vector signed short)
+                vec_perm (uyvy, (vector unsigned char){0}, demux_v);
+
+            Y = (vector signed short)
+                vec_perm (uyvy, (vector unsigned char){0}, demux_y);
+
+            cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
+
+            R  = vec_packclp (R0,R1);
+            G  = vec_packclp (G0,G1);
+            B  = vec_packclp (B0,B1);
+
+            //      vec_mstbgr24 (R,G,B, out);
+            out_rgba (R,G,B,out);
+
+            img += 32;
+        }
+    }
+    return srcSliceH;
+}
+
+
+
+/* Ok currently the acceleration routine only supports
+   inputs of widths a multiple of 16
+   and heights a multiple 2
+
+   So we just fall back to the C codes for this.
+*/
+SwsFunc ff_yuv2rgb_init_altivec(SwsContext *c)
+{
+    if (!(c->flags & SWS_CPU_CAPS_ALTIVEC))
+        return NULL;
+
+    /*
+      and this seems not to matter too much I tried a bunch of
+      videos with abnormal widths and MPlayer crashes elsewhere.
+      mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
+      boom with X11 bad match.
+
+    */
+    if ((c->srcW & 0xf) != 0)    return NULL;
+
+    switch (c->srcFormat) {
+    case PIX_FMT_YUV410P:
+    case PIX_FMT_YUV420P:
+    /*case IMGFMT_CLPL:        ??? */
+    case PIX_FMT_GRAY8:
+    case PIX_FMT_NV12:
+    case PIX_FMT_NV21:
+        if ((c->srcH & 0x1) != 0)
+            return NULL;
+
+        switch(c->dstFormat) {
+        case PIX_FMT_RGB24:
+            av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n");
+            return altivec_yuv2_rgb24;
+        case PIX_FMT_BGR24:
+            av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n");
+            return altivec_yuv2_bgr24;
+        case PIX_FMT_ARGB:
+            av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n");
+            return altivec_yuv2_argb;
+        case PIX_FMT_ABGR:
+            av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n");
+            return altivec_yuv2_abgr;
+        case PIX_FMT_RGBA:
+            av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n");
+            return altivec_yuv2_rgba;
+        case PIX_FMT_BGRA:
+            av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n");
+            return altivec_yuv2_bgra;
+        default: return NULL;
+        }
+        break;
+
+    case PIX_FMT_UYVY422:
+        switch(c->dstFormat) {
+        case PIX_FMT_BGR32:
+            av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n");
+            return altivec_uyvy_rgb32;
+        default: return NULL;
+        }
+        break;
+
+    }
+    return NULL;
+}
+
+void ff_yuv2rgb_init_tables_altivec(SwsContext *c, const int inv_table[4], int brightness, int contrast, int saturation)
+{
+    union {
+        DECLARE_ALIGNED(16, signed short, tmp)[8];
+        vector signed short vec;
+    } buf;
+
+    buf.tmp[0] =  ((0xffffLL) * contrast>>8)>>9;                        //cy
+    buf.tmp[1] =  -256*brightness;                                      //oy
+    buf.tmp[2] =  (inv_table[0]>>3) *(contrast>>16)*(saturation>>16);   //crv
+    buf.tmp[3] =  (inv_table[1]>>3) *(contrast>>16)*(saturation>>16);   //cbu
+    buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16));  //cgu
+    buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16));  //cgv
+
+
+    c->CSHIFT = (vector unsigned short)vec_splat_u16(2);
+    c->CY   = vec_splat ((vector signed short)buf.vec, 0);
+    c->OY   = vec_splat ((vector signed short)buf.vec, 1);
+    c->CRV  = vec_splat ((vector signed short)buf.vec, 2);
+    c->CBU  = vec_splat ((vector signed short)buf.vec, 3);
+    c->CGU  = vec_splat ((vector signed short)buf.vec, 4);
+    c->CGV  = vec_splat ((vector signed short)buf.vec, 5);
+    return;
+}
+
+
+void
+ff_yuv2packedX_altivec(SwsContext *c,
+                       const int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+                       const int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+                     uint8_t *dest, int dstW, int dstY)
+{
+    int i,j;
+    vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
+    vector signed short R0,G0,B0,R1,G1,B1;
+
+    vector unsigned char R,G,B;
+    vector unsigned char *out,*nout;
+
+    vector signed short   RND = vec_splat_s16(1<<3);
+    vector unsigned short SCL = vec_splat_u16(4);
+    DECLARE_ALIGNED(16, unsigned long, scratch)[16];
+
+    vector signed short *YCoeffs, *CCoeffs;
+
+    YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
+    CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
+
+    out = (vector unsigned char *)dest;
+
+    for (i=0; i<dstW; i+=16) {
+        Y0 = RND;
+        Y1 = RND;
+        /* extract 16 coeffs from lumSrc */
+        for (j=0; j<lumFilterSize; j++) {
+            X0 = vec_ld (0,  &lumSrc[j][i]);
+            X1 = vec_ld (16, &lumSrc[j][i]);
+            Y0 = vec_mradds (X0, YCoeffs[j], Y0);
+            Y1 = vec_mradds (X1, YCoeffs[j], Y1);
+        }
+
+        U = RND;
+        V = RND;
+        /* extract 8 coeffs from U,V */
+        for (j=0; j<chrFilterSize; j++) {
+            X  = vec_ld (0, &chrSrc[j][i/2]);
+            U  = vec_mradds (X, CCoeffs[j], U);
+            X  = vec_ld (0, &chrSrc[j][i/2+2048]);
+            V  = vec_mradds (X, CCoeffs[j], V);
+        }
+
+        /* scale and clip signals */
+        Y0 = vec_sra (Y0, SCL);
+        Y1 = vec_sra (Y1, SCL);
+        U  = vec_sra (U,  SCL);
+        V  = vec_sra (V,  SCL);
+
+        Y0 = vec_clip_s16 (Y0);
+        Y1 = vec_clip_s16 (Y1);
+        U  = vec_clip_s16 (U);
+        V  = vec_clip_s16 (V);
+
+        /* now we have
+          Y0= y0 y1 y2 y3 y4 y5 y6 y7     Y1= y8 y9 y10 y11 y12 y13 y14 y15
+          U= u0 u1 u2 u3 u4 u5 u6 u7      V= v0 v1 v2 v3 v4 v5 v6 v7
+
+          Y0= y0 y1 y2 y3 y4 y5 y6 y7    Y1= y8 y9 y10 y11 y12 y13 y14 y15
+          U0= u0 u0 u1 u1 u2 u2 u3 u3    U1= u4 u4 u5 u5 u6 u6 u7 u7
+          V0= v0 v0 v1 v1 v2 v2 v3 v3    V1= v4 v4 v5 v5 v6 v6 v7 v7
+        */
+
+        U0 = vec_mergeh (U,U);
+        V0 = vec_mergeh (V,V);
+
+        U1 = vec_mergel (U,U);
+        V1 = vec_mergel (V,V);
+
+        cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
+        cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
+
+        R  = vec_packclp (R0,R1);
+        G  = vec_packclp (G0,G1);
+        B  = vec_packclp (B0,B1);
+
+        switch(c->dstFormat) {
+        case PIX_FMT_ABGR:  out_abgr  (R,G,B,out); break;
+        case PIX_FMT_BGRA:  out_bgra  (R,G,B,out); break;
+        case PIX_FMT_RGBA:  out_rgba  (R,G,B,out); break;
+        case PIX_FMT_ARGB:  out_argb  (R,G,B,out); break;
+        case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break;
+        case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break;
+        default:
+            {
+                /* If this is reached, the caller should have called yuv2packedXinC
+                   instead. */
+                static int printed_error_message;
+                if (!printed_error_message) {
+                    av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
+                           sws_format_name(c->dstFormat));
+                    printed_error_message=1;
+                }
+                return;
+            }
+        }
+    }
+
+    if (i < dstW) {
+        i -= 16;
+
+        Y0 = RND;
+        Y1 = RND;
+        /* extract 16 coeffs from lumSrc */
+        for (j=0; j<lumFilterSize; j++) {
+            X0 = vec_ld (0,  &lumSrc[j][i]);
+            X1 = vec_ld (16, &lumSrc[j][i]);
+            Y0 = vec_mradds (X0, YCoeffs[j], Y0);
+            Y1 = vec_mradds (X1, YCoeffs[j], Y1);
+        }
+
+        U = RND;
+        V = RND;
+        /* extract 8 coeffs from U,V */
+        for (j=0; j<chrFilterSize; j++) {
+            X  = vec_ld (0, &chrSrc[j][i/2]);
+            U  = vec_mradds (X, CCoeffs[j], U);
+            X  = vec_ld (0, &chrSrc[j][i/2+2048]);
+            V  = vec_mradds (X, CCoeffs[j], V);
+        }
+
+        /* scale and clip signals */
+        Y0 = vec_sra (Y0, SCL);
+        Y1 = vec_sra (Y1, SCL);
+        U  = vec_sra (U,  SCL);
+        V  = vec_sra (V,  SCL);
+
+        Y0 = vec_clip_s16 (Y0);
+        Y1 = vec_clip_s16 (Y1);
+        U  = vec_clip_s16 (U);
+        V  = vec_clip_s16 (V);
+
+        /* now we have
+           Y0= y0 y1 y2 y3 y4 y5 y6 y7     Y1= y8 y9 y10 y11 y12 y13 y14 y15
+           U = u0 u1 u2 u3 u4 u5 u6 u7     V = v0 v1 v2 v3 v4 v5 v6 v7
+
+           Y0= y0 y1 y2 y3 y4 y5 y6 y7    Y1= y8 y9 y10 y11 y12 y13 y14 y15
+           U0= u0 u0 u1 u1 u2 u2 u3 u3    U1= u4 u4 u5 u5 u6 u6 u7 u7
+           V0= v0 v0 v1 v1 v2 v2 v3 v3    V1= v4 v4 v5 v5 v6 v6 v7 v7
+        */
+
+        U0 = vec_mergeh (U,U);
+        V0 = vec_mergeh (V,V);
+
+        U1 = vec_mergel (U,U);
+        V1 = vec_mergel (V,V);
+
+        cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
+        cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
+
+        R  = vec_packclp (R0,R1);
+        G  = vec_packclp (G0,G1);
+        B  = vec_packclp (B0,B1);
+
+        nout = (vector unsigned char *)scratch;
+        switch(c->dstFormat) {
+        case PIX_FMT_ABGR:  out_abgr  (R,G,B,nout); break;
+        case PIX_FMT_BGRA:  out_bgra  (R,G,B,nout); break;
+        case PIX_FMT_RGBA:  out_rgba  (R,G,B,nout); break;
+        case PIX_FMT_ARGB:  out_argb  (R,G,B,nout); break;
+        case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break;
+        case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break;
+        default:
+            /* Unreachable, I think. */
+            av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
+                   sws_format_name(c->dstFormat));
+            return;
+        }
+
+        memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
+    }
+
+}

Added: branches/0.6/libswscale/rgb2rgb.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/rgb2rgb.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,461 @@
+/*
+ * software RGB to RGB converter
+ * pluralize by software PAL8 to RGB converter
+ *              software YUV to YUV converter
+ *              software YUV to RGB converter
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni at gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <inttypes.h>
+#include "config.h"
+#include "libavutil/x86_cpu.h"
+#include "libavutil/bswap.h"
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+
+#define FAST_BGR2YV12 // use 7-bit instead of 15-bit coefficients
+
+void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
+
+void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                   long width, long height,
+                   long lumStride, long chromStride, long dstStride);
+void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                   long width, long height,
+                   long lumStride, long chromStride, long dstStride);
+void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                      long width, long height,
+                      long lumStride, long chromStride, long dstStride);
+void (*yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                      long width, long height,
+                      long lumStride, long chromStride, long dstStride);
+void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                   long width, long height,
+                   long lumStride, long chromStride, long srcStride);
+void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                    long width, long height,
+                    long lumStride, long chromStride, long srcStride);
+void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height,
+                 long srcStride, long dstStride);
+void (*interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dst,
+                        long width, long height, long src1Stride,
+                        long src2Stride, long dstStride);
+void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+                    uint8_t *dst1, uint8_t *dst2,
+                    long width, long height,
+                    long srcStride1, long srcStride2,
+                    long dstStride1, long dstStride2);
+void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+                     uint8_t *dst,
+                     long width, long height,
+                     long srcStride1, long srcStride2,
+                     long srcStride3, long dstStride);
+void (*uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                     long width, long height,
+                     long lumStride, long chromStride, long srcStride);
+void (*uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                     long width, long height,
+                     long lumStride, long chromStride, long srcStride);
+void (*yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                     long width, long height,
+                     long lumStride, long chromStride, long srcStride);
+void (*yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                     long width, long height,
+                     long lumStride, long chromStride, long srcStride);
+
+
+#if ARCH_X86
+DECLARE_ASM_CONST(8, uint64_t, mmx_null)     = 0x0000000000000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_one)      = 0xFFFFFFFFFFFFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32b)      = 0x000000FF000000FFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32g)      = 0x0000FF000000FF00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32r)      = 0x00FF000000FF0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32a)      = 0xFF000000FF000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32)       = 0x00FFFFFF00FFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3216br)   = 0x00F800F800F800F8ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3216g)    = 0x0000FC000000FC00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3215g)    = 0x0000F8000000F800ULL;
+DECLARE_ASM_CONST(8, uint64_t, mul3216)      = 0x2000000420000004ULL;
+DECLARE_ASM_CONST(8, uint64_t, mul3215)      = 0x2000000820000008ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24b)      = 0x00FF0000FF0000FFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24g)      = 0xFF0000FF0000FF00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24r)      = 0x0000FF0000FF0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24l)      = 0x0000000000FFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24h)      = 0x0000FFFFFF000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hh)     = 0xffff000000000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hhh)    = 0xffffffff00000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hhhh)   = 0xffffffffffff0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15b)      = 0x001F001F001F001FULL; /* 00000000 00011111  xxB */
+DECLARE_ASM_CONST(8, uint64_t, mask15rg)     = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000  RGx */
+DECLARE_ASM_CONST(8, uint64_t, mask15s)      = 0xFFE0FFE0FFE0FFE0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15g)      = 0x03E003E003E003E0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15r)      = 0x7C007C007C007C00ULL;
+#define mask16b mask15b
+DECLARE_ASM_CONST(8, uint64_t, mask16g)      = 0x07E007E007E007E0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask16r)      = 0xF800F800F800F800ULL;
+DECLARE_ASM_CONST(8, uint64_t, red_16mask)   = 0x0000f8000000f800ULL;
+DECLARE_ASM_CONST(8, uint64_t, green_16mask) = 0x000007e0000007e0ULL;
+DECLARE_ASM_CONST(8, uint64_t, blue_16mask)  = 0x0000001f0000001fULL;
+DECLARE_ASM_CONST(8, uint64_t, red_15mask)   = 0x00007c0000007c00ULL;
+DECLARE_ASM_CONST(8, uint64_t, green_15mask) = 0x000003e0000003e0ULL;
+DECLARE_ASM_CONST(8, uint64_t, blue_15mask)  = 0x0000001f0000001fULL;
+#endif /* ARCH_X86 */
+
+#define RGB2YUV_SHIFT 8
+#define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5))
+#define BV ((int)(-0.071*(1<<RGB2YUV_SHIFT)+0.5))
+#define BU ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define GY ((int)( 0.504*(1<<RGB2YUV_SHIFT)+0.5))
+#define GV ((int)(-0.368*(1<<RGB2YUV_SHIFT)+0.5))
+#define GU ((int)(-0.291*(1<<RGB2YUV_SHIFT)+0.5))
+#define RY ((int)( 0.257*(1<<RGB2YUV_SHIFT)+0.5))
+#define RV ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
+#define RU ((int)(-0.148*(1<<RGB2YUV_SHIFT)+0.5))
+
+//Note: We have C, MMX, MMX2, 3DNOW versions, there is no 3DNOW + MMX2 one.
+//plain C versions
+#undef HAVE_MMX
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#undef HAVE_SSE2
+#define HAVE_MMX 0
+#define HAVE_MMX2 0
+#define HAVE_AMD3DNOW 0
+#define HAVE_SSE2 0
+#define RENAME(a) a ## _C
+#include "rgb2rgb_template.c"
+
+#if ARCH_X86
+
+//MMX versions
+#undef RENAME
+#undef HAVE_MMX
+#define HAVE_MMX 1
+#define RENAME(a) a ## _MMX
+#include "rgb2rgb_template.c"
+
+//MMX2 versions
+#undef RENAME
+#undef HAVE_MMX2
+#define HAVE_MMX2 1
+#define RENAME(a) a ## _MMX2
+#include "rgb2rgb_template.c"
+
+//3DNOW versions
+#undef RENAME
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#define HAVE_MMX2 0
+#define HAVE_AMD3DNOW 1
+#define RENAME(a) a ## _3DNOW
+#include "rgb2rgb_template.c"
+
+#endif //ARCH_X86 || ARCH_X86_64
+
+/*
+ RGB15->RGB16 original by Strepto/Astral
+ ported to gcc & bugfixed : A'rpi
+ MMX2, 3DNOW optimization by Nick Kurshev
+ 32-bit C version, and and&add trick by Michael Niedermayer
+*/
+
+void sws_rgb2rgb_init(int flags)
+{
+#if HAVE_MMX2 || HAVE_AMD3DNOW || HAVE_MMX
+    if (flags & SWS_CPU_CAPS_MMX2)
+        rgb2rgb_init_MMX2();
+    else if (flags & SWS_CPU_CAPS_3DNOW)
+        rgb2rgb_init_3DNOW();
+    else if (flags & SWS_CPU_CAPS_MMX)
+        rgb2rgb_init_MMX();
+    else
+#endif /* HAVE_MMX2 || HAVE_AMD3DNOW || HAVE_MMX */
+        rgb2rgb_init_C();
+}
+
+/**
+ * Convert the palette to the same packet 32-bit format as the palette
+ */
+void palette8topacked32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+    long i;
+
+    for (i=0; i<num_pixels; i++)
+        ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i]];
+}
+
+/**
+ * Palette format: ABCD -> dst format: ABC
+ */
+void palette8topacked24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+    long i;
+
+    for (i=0; i<num_pixels; i++) {
+        //FIXME slow?
+        dst[0]= palette[src[i]*4+0];
+        dst[1]= palette[src[i]*4+1];
+        dst[2]= palette[src[i]*4+2];
+        dst+= 3;
+    }
+}
+
+/**
+ * Palette is assumed to contain BGR16, see rgb32to16 to convert the palette.
+ */
+void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+    long i;
+    for (i=0; i<num_pixels; i++)
+        ((uint16_t *)dst)[i] = ((const uint16_t *)palette)[src[i]];
+}
+void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+    long i;
+    for (i=0; i<num_pixels; i++)
+        ((uint16_t *)dst)[i] = bswap_16(((const uint16_t *)palette)[src[i]]);
+}
+
+/**
+ * Palette is assumed to contain BGR15, see rgb32to15 to convert the palette.
+ */
+void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+    long i;
+    for (i=0; i<num_pixels; i++)
+        ((uint16_t *)dst)[i] = ((const uint16_t *)palette)[src[i]];
+}
+void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
+{
+    long i;
+    for (i=0; i<num_pixels; i++)
+        ((uint16_t *)dst)[i] = bswap_16(((const uint16_t *)palette)[src[i]]);
+}
+
+void rgb32to24(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    long i;
+    long num_pixels = src_size >> 2;
+    for (i=0; i<num_pixels; i++) {
+#if HAVE_BIGENDIAN
+        /* RGB32 (= A,B,G,R) -> BGR24 (= B,G,R) */
+        dst[3*i + 0] = src[4*i + 1];
+        dst[3*i + 1] = src[4*i + 2];
+        dst[3*i + 2] = src[4*i + 3];
+#else
+        dst[3*i + 0] = src[4*i + 2];
+        dst[3*i + 1] = src[4*i + 1];
+        dst[3*i + 2] = src[4*i + 0];
+#endif
+    }
+}
+
+void rgb24to32(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    long i;
+    for (i=0; 3*i<src_size; i++) {
+#if HAVE_BIGENDIAN
+        /* RGB24 (= R,G,B) -> BGR32 (= A,R,G,B) */
+        dst[4*i + 0] = 255;
+        dst[4*i + 1] = src[3*i + 0];
+        dst[4*i + 2] = src[3*i + 1];
+        dst[4*i + 3] = src[3*i + 2];
+#else
+        dst[4*i + 0] = src[3*i + 2];
+        dst[4*i + 1] = src[3*i + 1];
+        dst[4*i + 2] = src[3*i + 0];
+        dst[4*i + 3] = 255;
+#endif
+    }
+}
+
+void rgb16tobgr32(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint16_t *end;
+    uint8_t *d = dst;
+    const uint16_t *s = (const uint16_t *)src;
+    end = s + src_size/2;
+    while (s < end) {
+        register uint16_t bgr;
+        bgr = *s++;
+#if HAVE_BIGENDIAN
+        *d++ = 255;
+        *d++ = (bgr&0x1F)<<3;
+        *d++ = (bgr&0x7E0)>>3;
+        *d++ = (bgr&0xF800)>>8;
+#else
+        *d++ = (bgr&0xF800)>>8;
+        *d++ = (bgr&0x7E0)>>3;
+        *d++ = (bgr&0x1F)<<3;
+        *d++ = 255;
+#endif
+    }
+}
+
+void rgb16to24(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint16_t *end;
+    uint8_t *d = dst;
+    const uint16_t *s = (const uint16_t *)src;
+    end = s + src_size/2;
+    while (s < end) {
+        register uint16_t bgr;
+        bgr = *s++;
+        *d++ = (bgr&0xF800)>>8;
+        *d++ = (bgr&0x7E0)>>3;
+        *d++ = (bgr&0x1F)<<3;
+    }
+}
+
+void rgb16tobgr16(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    long i;
+    long num_pixels = src_size >> 1;
+
+    for (i=0; i<num_pixels; i++) {
+        unsigned rgb = ((const uint16_t*)src)[i];
+        ((uint16_t*)dst)[i] = (rgb>>11) | (rgb&0x7E0) | (rgb<<11);
+    }
+}
+
+void rgb16tobgr15(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    long i;
+    long num_pixels = src_size >> 1;
+
+    for (i=0; i<num_pixels; i++) {
+        unsigned rgb = ((const uint16_t*)src)[i];
+        ((uint16_t*)dst)[i] = (rgb>>11) | ((rgb&0x7C0)>>1) | ((rgb&0x1F)<<10);
+    }
+}
+
+void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint16_t *end;
+    uint8_t *d = dst;
+    const uint16_t *s = (const uint16_t *)src;
+    end = s + src_size/2;
+    while (s < end) {
+        register uint16_t bgr;
+        bgr = *s++;
+#if HAVE_BIGENDIAN
+        *d++ = 255;
+        *d++ = (bgr&0x1F)<<3;
+        *d++ = (bgr&0x3E0)>>2;
+        *d++ = (bgr&0x7C00)>>7;
+#else
+        *d++ = (bgr&0x7C00)>>7;
+        *d++ = (bgr&0x3E0)>>2;
+        *d++ = (bgr&0x1F)<<3;
+        *d++ = 255;
+#endif
+    }
+}
+
+void rgb15to24(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint16_t *end;
+    uint8_t *d = dst;
+    const uint16_t *s = (const uint16_t *)src;
+    end = s + src_size/2;
+    while (s < end) {
+        register uint16_t bgr;
+        bgr = *s++;
+        *d++ = (bgr&0x7C00)>>7;
+        *d++ = (bgr&0x3E0)>>2;
+        *d++ = (bgr&0x1F)<<3;
+    }
+}
+
+void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    long i;
+    long num_pixels = src_size >> 1;
+
+    for (i=0; i<num_pixels; i++) {
+        unsigned rgb = ((const uint16_t*)src)[i];
+        ((uint16_t*)dst)[i] = ((rgb&0x7C00)>>10) | ((rgb&0x3E0)<<1) | (rgb<<11);
+    }
+}
+
+void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    long i;
+    long num_pixels = src_size >> 1;
+
+    for (i=0; i<num_pixels; i++) {
+        unsigned br;
+        unsigned rgb = ((const uint16_t*)src)[i];
+        br = rgb&0x7c1F;
+        ((uint16_t*)dst)[i] = (br>>10) | (rgb&0x3E0) | (br<<10);
+    }
+}
+
+void bgr8torgb8(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    long i;
+    long num_pixels = src_size;
+    for (i=0; i<num_pixels; i++) {
+        unsigned b,g,r;
+        register uint8_t rgb;
+        rgb = src[i];
+        r = (rgb&0x07);
+        g = (rgb&0x38)>>3;
+        b = (rgb&0xC0)>>6;
+        dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6);
+    }
+}
+
+#define DEFINE_SHUFFLE_BYTES(a, b, c, d)                                \
+void shuffle_bytes_##a##b##c##d(const uint8_t *src, uint8_t *dst, long src_size) \
+{                                                                       \
+    long i;                                                             \
+                                                                        \
+    for (i = 0; i < src_size; i+=4) {                                   \
+        dst[i + 0] = src[i + a];                                        \
+        dst[i + 1] = src[i + b];                                        \
+        dst[i + 2] = src[i + c];                                        \
+        dst[i + 3] = src[i + d];                                        \
+    }                                                                   \
+}
+
+DEFINE_SHUFFLE_BYTES(0, 3, 2, 1);
+DEFINE_SHUFFLE_BYTES(1, 2, 3, 0);
+DEFINE_SHUFFLE_BYTES(2, 1, 0, 3);
+DEFINE_SHUFFLE_BYTES(3, 0, 1, 2);
+DEFINE_SHUFFLE_BYTES(3, 2, 1, 0);
+

Added: branches/0.6/libswscale/rgb2rgb.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/rgb2rgb.h	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,166 @@
+/*
+ *  software RGB to RGB converter
+ *  pluralize by Software PAL8 to RGB converter
+ *               Software YUV to YUV converter
+ *               Software YUV to RGB converter
+ *  Written by Nick Kurshev.
+ *  palette & YUV & runtime CPU stuff by Michael (michaelni at gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWSCALE_RGB2RGB_H
+#define SWSCALE_RGB2RGB_H
+
+#include <inttypes.h>
+
+/* A full collection of RGB to RGB(BGR) converters */
+extern void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32to16)   (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32to15)   (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb15to16)   (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb15to32)   (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb16to15)   (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb16to32)   (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24to16)   (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24to15)   (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
+
+void rgb24to32   (const uint8_t *src, uint8_t *dst, long src_size);
+void rgb32to24   (const uint8_t *src, uint8_t *dst, long src_size);
+void rgb16tobgr32(const uint8_t *src, uint8_t *dst, long src_size);
+void rgb16to24   (const uint8_t *src, uint8_t *dst, long src_size);
+void rgb16tobgr16(const uint8_t *src, uint8_t *dst, long src_size);
+void rgb16tobgr15(const uint8_t *src, uint8_t *dst, long src_size);
+void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size);
+void rgb15to24   (const uint8_t *src, uint8_t *dst, long src_size);
+void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size);
+void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size);
+void bgr8torgb8  (const uint8_t *src, uint8_t *dst, long src_size);
+
+void shuffle_bytes_0321(const uint8_t *src, uint8_t *dst, long src_size);
+void shuffle_bytes_1230(const uint8_t *src, uint8_t *dst, long src_size);
+void shuffle_bytes_2103(const uint8_t *src, uint8_t *dst, long src_size);
+void shuffle_bytes_3012(const uint8_t *src, uint8_t *dst, long src_size);
+void shuffle_bytes_3210(const uint8_t *src, uint8_t *dst, long src_size);
+
+void palette8topacked32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+void palette8topacked24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line, others are ignored.
+ * FIXME: Write high quality version.
+ */
+//void uyvytoyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+extern void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                          long width, long height,
+                          long lumStride, long chromStride, long dstStride);
+
+/**
+ * Width should be a multiple of 16.
+ */
+extern void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                             long width, long height,
+                             long lumStride, long chromStride, long dstStride);
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+extern void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                          long width, long height,
+                          long lumStride, long chromStride, long srcStride);
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+extern void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                          long width, long height,
+                          long lumStride, long chromStride, long dstStride);
+
+/**
+ * Width should be a multiple of 16.
+ */
+extern void (*yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                             long width, long height,
+                             long lumStride, long chromStride, long dstStride);
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 2.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line, others are ignored.
+ * FIXME: Write high quality version.
+ */
+extern void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                           long width, long height,
+                           long lumStride, long chromStride, long srcStride);
+extern void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height,
+                        long srcStride, long dstStride);
+
+extern void (*interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dst,
+                               long width, long height, long src1Stride,
+                               long src2Stride, long dstStride);
+
+extern void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+                           uint8_t *dst1, uint8_t *dst2,
+                           long width, long height,
+                           long srcStride1, long srcStride2,
+                           long dstStride1, long dstStride2);
+
+extern void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+                            uint8_t *dst,
+                            long width, long height,
+                            long srcStride1, long srcStride2,
+                            long srcStride3, long dstStride);
+
+
+extern void (*uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                            long width, long height,
+                            long lumStride, long chromStride, long srcStride);
+extern void (*uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                            long width, long height,
+                            long lumStride, long chromStride, long srcStride);
+extern void (*yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                            long width, long height,
+                            long lumStride, long chromStride, long srcStride);
+extern void (*yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                            long width, long height,
+                            long lumStride, long chromStride, long srcStride);
+
+void sws_rgb2rgb_init(int flags);
+
+#endif /* SWSCALE_RGB2RGB_H */

Added: branches/0.6/libswscale/rgb2rgb_template.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/rgb2rgb_template.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,2944 @@
+/*
+ * software RGB to RGB converter
+ * pluralize by software PAL8 to RGB converter
+ *              software YUV to YUV converter
+ *              software YUV to RGB converter
+ * Written by Nick Kurshev.
+ * palette & YUV & runtime CPU stuff by Michael (michaelni at gmx.at)
+ * lot of big-endian byte order fixes by Alex Beregszaszi
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stddef.h>
+
+#undef PREFETCH
+#undef MOVNTQ
+#undef EMMS
+#undef SFENCE
+#undef MMREG_SIZE
+#undef PAVGB
+
+#if HAVE_SSE2
+#define MMREG_SIZE 16
+#else
+#define MMREG_SIZE 8
+#endif
+
+#if HAVE_AMD3DNOW
+#define PREFETCH  "prefetch"
+#define PAVGB     "pavgusb"
+#elif HAVE_MMX2
+#define PREFETCH "prefetchnta"
+#define PAVGB     "pavgb"
+#else
+#define PREFETCH  " # nop"
+#endif
+
+#if HAVE_AMD3DNOW
+/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
+#define EMMS     "femms"
+#else
+#define EMMS     "emms"
+#endif
+
+#if HAVE_MMX2
+#define MOVNTQ "movntq"
+#define SFENCE "sfence"
+#else
+#define MOVNTQ "movq"
+#define SFENCE " # nop"
+#endif
+
+static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    uint8_t *dest = dst;
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*s):"memory");
+    mm_end = end - 23;
+    __asm__ volatile("movq        %0, %%mm7"::"m"(mask32a):"memory");
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movd          %1, %%mm0    \n\t"
+            "punpckldq    3%1, %%mm0    \n\t"
+            "movd         6%1, %%mm1    \n\t"
+            "punpckldq    9%1, %%mm1    \n\t"
+            "movd        12%1, %%mm2    \n\t"
+            "punpckldq   15%1, %%mm2    \n\t"
+            "movd        18%1, %%mm3    \n\t"
+            "punpckldq   21%1, %%mm3    \n\t"
+            "por        %%mm7, %%mm0    \n\t"
+            "por        %%mm7, %%mm1    \n\t"
+            "por        %%mm7, %%mm2    \n\t"
+            "por        %%mm7, %%mm3    \n\t"
+            MOVNTQ"     %%mm0,   %0     \n\t"
+            MOVNTQ"     %%mm1,  8%0     \n\t"
+            MOVNTQ"     %%mm2, 16%0     \n\t"
+            MOVNTQ"     %%mm3, 24%0"
+            :"=m"(*dest)
+            :"m"(*s)
+            :"memory");
+        dest += 32;
+        s += 24;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+#if HAVE_BIGENDIAN
+        /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */
+        *dest++ = 255;
+        *dest++ = s[2];
+        *dest++ = s[1];
+        *dest++ = s[0];
+        s+=3;
+#else
+        *dest++ = *s++;
+        *dest++ = *s++;
+        *dest++ = *s++;
+        *dest++ = 255;
+#endif
+    }
+}
+
+#define STORE_BGR24_MMX \
+            "psrlq         $8, %%mm2    \n\t" \
+            "psrlq         $8, %%mm3    \n\t" \
+            "psrlq         $8, %%mm6    \n\t" \
+            "psrlq         $8, %%mm7    \n\t" \
+            "pand "MANGLE(mask24l)", %%mm0\n\t" \
+            "pand "MANGLE(mask24l)", %%mm1\n\t" \
+            "pand "MANGLE(mask24l)", %%mm4\n\t" \
+            "pand "MANGLE(mask24l)", %%mm5\n\t" \
+            "pand "MANGLE(mask24h)", %%mm2\n\t" \
+            "pand "MANGLE(mask24h)", %%mm3\n\t" \
+            "pand "MANGLE(mask24h)", %%mm6\n\t" \
+            "pand "MANGLE(mask24h)", %%mm7\n\t" \
+            "por        %%mm2, %%mm0    \n\t" \
+            "por        %%mm3, %%mm1    \n\t" \
+            "por        %%mm6, %%mm4    \n\t" \
+            "por        %%mm7, %%mm5    \n\t" \
+ \
+            "movq       %%mm1, %%mm2    \n\t" \
+            "movq       %%mm4, %%mm3    \n\t" \
+            "psllq        $48, %%mm2    \n\t" \
+            "psllq        $32, %%mm3    \n\t" \
+            "pand "MANGLE(mask24hh)", %%mm2\n\t" \
+            "pand "MANGLE(mask24hhh)", %%mm3\n\t" \
+            "por        %%mm2, %%mm0    \n\t" \
+            "psrlq        $16, %%mm1    \n\t" \
+            "psrlq        $32, %%mm4    \n\t" \
+            "psllq        $16, %%mm5    \n\t" \
+            "por        %%mm3, %%mm1    \n\t" \
+            "pand  "MANGLE(mask24hhhh)", %%mm5\n\t" \
+            "por        %%mm5, %%mm4    \n\t" \
+ \
+            MOVNTQ"     %%mm0,   %0     \n\t" \
+            MOVNTQ"     %%mm1,  8%0     \n\t" \
+            MOVNTQ"     %%mm4, 16%0"
+
+
+static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    uint8_t *dest = dst;
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*s):"memory");
+    mm_end = end - 31;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movq          %1, %%mm0    \n\t"
+            "movq         8%1, %%mm1    \n\t"
+            "movq        16%1, %%mm4    \n\t"
+            "movq        24%1, %%mm5    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm1, %%mm3    \n\t"
+            "movq       %%mm4, %%mm6    \n\t"
+            "movq       %%mm5, %%mm7    \n\t"
+            STORE_BGR24_MMX
+            :"=m"(*dest)
+            :"m"(*s)
+            :"memory");
+        dest += 24;
+        s += 32;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+#if HAVE_BIGENDIAN
+        /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */
+        s++;
+        dest[2] = *s++;
+        dest[1] = *s++;
+        dest[0] = *s++;
+        dest += 3;
+#else
+        *dest++ = *s++;
+        *dest++ = *s++;
+        *dest++ = *s++;
+        s++;
+#endif
+    }
+}
+
+/*
+ original by Strepto/Astral
+ ported to gcc & bugfixed: A'rpi
+ MMX2, 3DNOW optimization by Nick Kurshev
+ 32-bit C version, and and&add trick by Michael Niedermayer
+*/
+static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    register const uint8_t* s=src;
+    register uint8_t* d=dst;
+    register const uint8_t *end;
+    const uint8_t *mm_end;
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*s));
+    __asm__ volatile("movq        %0, %%mm4"::"m"(mask15s));
+    mm_end = end - 15;
+    while (s<mm_end) {
+        __asm__ volatile(
+            PREFETCH"  32%1         \n\t"
+            "movq        %1, %%mm0  \n\t"
+            "movq       8%1, %%mm2  \n\t"
+            "movq     %%mm0, %%mm1  \n\t"
+            "movq     %%mm2, %%mm3  \n\t"
+            "pand     %%mm4, %%mm0  \n\t"
+            "pand     %%mm4, %%mm2  \n\t"
+            "paddw    %%mm1, %%mm0  \n\t"
+            "paddw    %%mm3, %%mm2  \n\t"
+            MOVNTQ"   %%mm0,  %0    \n\t"
+            MOVNTQ"   %%mm2, 8%0"
+            :"=m"(*d)
+            :"m"(*s)
+        );
+        d+=16;
+        s+=16;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    mm_end = end - 3;
+    while (s < mm_end) {
+        register unsigned x= *((const uint32_t *)s);
+        *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
+        d+=4;
+        s+=4;
+    }
+    if (s < end) {
+        register unsigned short x= *((const uint16_t *)s);
+        *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
+    }
+}
+
+static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    register const uint8_t* s=src;
+    register uint8_t* d=dst;
+    register const uint8_t *end;
+    const uint8_t *mm_end;
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*s));
+    __asm__ volatile("movq        %0, %%mm7"::"m"(mask15rg));
+    __asm__ volatile("movq        %0, %%mm6"::"m"(mask15b));
+    mm_end = end - 15;
+    while (s<mm_end) {
+        __asm__ volatile(
+            PREFETCH"  32%1         \n\t"
+            "movq        %1, %%mm0  \n\t"
+            "movq       8%1, %%mm2  \n\t"
+            "movq     %%mm0, %%mm1  \n\t"
+            "movq     %%mm2, %%mm3  \n\t"
+            "psrlq       $1, %%mm0  \n\t"
+            "psrlq       $1, %%mm2  \n\t"
+            "pand     %%mm7, %%mm0  \n\t"
+            "pand     %%mm7, %%mm2  \n\t"
+            "pand     %%mm6, %%mm1  \n\t"
+            "pand     %%mm6, %%mm3  \n\t"
+            "por      %%mm1, %%mm0  \n\t"
+            "por      %%mm3, %%mm2  \n\t"
+            MOVNTQ"   %%mm0,  %0    \n\t"
+            MOVNTQ"   %%mm2, 8%0"
+            :"=m"(*d)
+            :"m"(*s)
+        );
+        d+=16;
+        s+=16;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    mm_end = end - 3;
+    while (s < mm_end) {
+        register uint32_t x= *((const uint32_t*)s);
+        *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
+        s+=4;
+        d+=4;
+    }
+    if (s < end) {
+        register uint16_t x= *((const uint16_t*)s);
+        *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
+    }
+}
+
+static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    uint16_t *d = (uint16_t *)dst;
+    end = s + src_size;
+#if HAVE_MMX
+    mm_end = end - 15;
+#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
+    __asm__ volatile(
+        "movq           %3, %%mm5   \n\t"
+        "movq           %4, %%mm6   \n\t"
+        "movq           %5, %%mm7   \n\t"
+        "jmp 2f                     \n\t"
+        ASMALIGN(4)
+        "1:                         \n\t"
+        PREFETCH"   32(%1)          \n\t"
+        "movd         (%1), %%mm0   \n\t"
+        "movd        4(%1), %%mm3   \n\t"
+        "punpckldq   8(%1), %%mm0   \n\t"
+        "punpckldq  12(%1), %%mm3   \n\t"
+        "movq        %%mm0, %%mm1   \n\t"
+        "movq        %%mm3, %%mm4   \n\t"
+        "pand        %%mm6, %%mm0   \n\t"
+        "pand        %%mm6, %%mm3   \n\t"
+        "pmaddwd     %%mm7, %%mm0   \n\t"
+        "pmaddwd     %%mm7, %%mm3   \n\t"
+        "pand        %%mm5, %%mm1   \n\t"
+        "pand        %%mm5, %%mm4   \n\t"
+        "por         %%mm1, %%mm0   \n\t"
+        "por         %%mm4, %%mm3   \n\t"
+        "psrld          $5, %%mm0   \n\t"
+        "pslld         $11, %%mm3   \n\t"
+        "por         %%mm3, %%mm0   \n\t"
+        MOVNTQ"      %%mm0, (%0)    \n\t"
+        "add           $16,  %1     \n\t"
+        "add            $8,  %0     \n\t"
+        "2:                         \n\t"
+        "cmp            %2,  %1     \n\t"
+        " jb            1b          \n\t"
+        : "+r" (d), "+r"(s)
+        : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
+    );
+#else
+    __asm__ volatile(PREFETCH"    %0"::"m"(*src):"memory");
+    __asm__ volatile(
+        "movq    %0, %%mm7    \n\t"
+        "movq    %1, %%mm6    \n\t"
+        ::"m"(red_16mask),"m"(green_16mask));
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movd          %1, %%mm0    \n\t"
+            "movd         4%1, %%mm3    \n\t"
+            "punpckldq    8%1, %%mm0    \n\t"
+            "punpckldq   12%1, %%mm3    \n\t"
+            "movq       %%mm0, %%mm1    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm3, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "psrlq         $3, %%mm0    \n\t"
+            "psrlq         $3, %%mm3    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %2, %%mm3    \n\t"
+            "psrlq         $5, %%mm1    \n\t"
+            "psrlq         $5, %%mm4    \n\t"
+            "pand       %%mm6, %%mm1    \n\t"
+            "pand       %%mm6, %%mm4    \n\t"
+            "psrlq         $8, %%mm2    \n\t"
+            "psrlq         $8, %%mm5    \n\t"
+            "pand       %%mm7, %%mm2    \n\t"
+            "pand       %%mm7, %%mm5    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+            "psllq        $16, %%mm3    \n\t"
+            "por        %%mm3, %%mm0    \n\t"
+            MOVNTQ"     %%mm0, %0       \n\t"
+            :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+        d += 4;
+        s += 16;
+    }
+#endif
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        register int rgb = *(const uint32_t*)s; s += 4;
+        *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
+    }
+}
+
+static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    uint16_t *d = (uint16_t *)dst;
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*src):"memory");
+    __asm__ volatile(
+        "movq          %0, %%mm7    \n\t"
+        "movq          %1, %%mm6    \n\t"
+        ::"m"(red_16mask),"m"(green_16mask));
+    mm_end = end - 15;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movd          %1, %%mm0    \n\t"
+            "movd         4%1, %%mm3    \n\t"
+            "punpckldq    8%1, %%mm0    \n\t"
+            "punpckldq   12%1, %%mm3    \n\t"
+            "movq       %%mm0, %%mm1    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm3, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "psllq         $8, %%mm0    \n\t"
+            "psllq         $8, %%mm3    \n\t"
+            "pand       %%mm7, %%mm0    \n\t"
+            "pand       %%mm7, %%mm3    \n\t"
+            "psrlq         $5, %%mm1    \n\t"
+            "psrlq         $5, %%mm4    \n\t"
+            "pand       %%mm6, %%mm1    \n\t"
+            "pand       %%mm6, %%mm4    \n\t"
+            "psrlq        $19, %%mm2    \n\t"
+            "psrlq        $19, %%mm5    \n\t"
+            "pand          %2, %%mm2    \n\t"
+            "pand          %2, %%mm5    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+            "psllq        $16, %%mm3    \n\t"
+            "por        %%mm3, %%mm0    \n\t"
+            MOVNTQ"     %%mm0, %0       \n\t"
+            :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+        d += 4;
+        s += 16;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        register int rgb = *(const uint32_t*)s; s += 4;
+        *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
+    }
+}
+
+static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    uint16_t *d = (uint16_t *)dst;
+    end = s + src_size;
+#if HAVE_MMX
+    mm_end = end - 15;
+#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
+    __asm__ volatile(
+        "movq           %3, %%mm5   \n\t"
+        "movq           %4, %%mm6   \n\t"
+        "movq           %5, %%mm7   \n\t"
+        "jmp            2f          \n\t"
+        ASMALIGN(4)
+        "1:                         \n\t"
+        PREFETCH"   32(%1)          \n\t"
+        "movd         (%1), %%mm0   \n\t"
+        "movd        4(%1), %%mm3   \n\t"
+        "punpckldq   8(%1), %%mm0   \n\t"
+        "punpckldq  12(%1), %%mm3   \n\t"
+        "movq        %%mm0, %%mm1   \n\t"
+        "movq        %%mm3, %%mm4   \n\t"
+        "pand        %%mm6, %%mm0   \n\t"
+        "pand        %%mm6, %%mm3   \n\t"
+        "pmaddwd     %%mm7, %%mm0   \n\t"
+        "pmaddwd     %%mm7, %%mm3   \n\t"
+        "pand        %%mm5, %%mm1   \n\t"
+        "pand        %%mm5, %%mm4   \n\t"
+        "por         %%mm1, %%mm0   \n\t"
+        "por         %%mm4, %%mm3   \n\t"
+        "psrld          $6, %%mm0   \n\t"
+        "pslld         $10, %%mm3   \n\t"
+        "por         %%mm3, %%mm0   \n\t"
+        MOVNTQ"      %%mm0, (%0)    \n\t"
+        "add           $16,  %1     \n\t"
+        "add            $8,  %0     \n\t"
+        "2:                         \n\t"
+        "cmp            %2,  %1     \n\t"
+        " jb            1b          \n\t"
+        : "+r" (d), "+r"(s)
+        : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
+    );
+#else
+    __asm__ volatile(PREFETCH"    %0"::"m"(*src):"memory");
+    __asm__ volatile(
+        "movq          %0, %%mm7    \n\t"
+        "movq          %1, %%mm6    \n\t"
+        ::"m"(red_15mask),"m"(green_15mask));
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movd          %1, %%mm0    \n\t"
+            "movd         4%1, %%mm3    \n\t"
+            "punpckldq    8%1, %%mm0    \n\t"
+            "punpckldq   12%1, %%mm3    \n\t"
+            "movq       %%mm0, %%mm1    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm3, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "psrlq         $3, %%mm0    \n\t"
+            "psrlq         $3, %%mm3    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %2, %%mm3    \n\t"
+            "psrlq         $6, %%mm1    \n\t"
+            "psrlq         $6, %%mm4    \n\t"
+            "pand       %%mm6, %%mm1    \n\t"
+            "pand       %%mm6, %%mm4    \n\t"
+            "psrlq         $9, %%mm2    \n\t"
+            "psrlq         $9, %%mm5    \n\t"
+            "pand       %%mm7, %%mm2    \n\t"
+            "pand       %%mm7, %%mm5    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+            "psllq        $16, %%mm3    \n\t"
+            "por        %%mm3, %%mm0    \n\t"
+            MOVNTQ"     %%mm0, %0       \n\t"
+            :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+        d += 4;
+        s += 16;
+    }
+#endif
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        register int rgb = *(const uint32_t*)s; s += 4;
+        *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
+    }
+}
+
+static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    uint16_t *d = (uint16_t *)dst;
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*src):"memory");
+    __asm__ volatile(
+        "movq          %0, %%mm7    \n\t"
+        "movq          %1, %%mm6    \n\t"
+        ::"m"(red_15mask),"m"(green_15mask));
+    mm_end = end - 15;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movd          %1, %%mm0    \n\t"
+            "movd         4%1, %%mm3    \n\t"
+            "punpckldq    8%1, %%mm0    \n\t"
+            "punpckldq   12%1, %%mm3    \n\t"
+            "movq       %%mm0, %%mm1    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm3, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "psllq         $7, %%mm0    \n\t"
+            "psllq         $7, %%mm3    \n\t"
+            "pand       %%mm7, %%mm0    \n\t"
+            "pand       %%mm7, %%mm3    \n\t"
+            "psrlq         $6, %%mm1    \n\t"
+            "psrlq         $6, %%mm4    \n\t"
+            "pand       %%mm6, %%mm1    \n\t"
+            "pand       %%mm6, %%mm4    \n\t"
+            "psrlq        $19, %%mm2    \n\t"
+            "psrlq        $19, %%mm5    \n\t"
+            "pand          %2, %%mm2    \n\t"
+            "pand          %2, %%mm5    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+            "psllq        $16, %%mm3    \n\t"
+            "por        %%mm3, %%mm0    \n\t"
+            MOVNTQ"     %%mm0, %0       \n\t"
+            :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+        d += 4;
+        s += 16;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        register int rgb = *(const uint32_t*)s; s += 4;
+        *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
+    }
+}
+
+static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    uint16_t *d = (uint16_t *)dst;
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*src):"memory");
+    __asm__ volatile(
+        "movq         %0, %%mm7     \n\t"
+        "movq         %1, %%mm6     \n\t"
+        ::"m"(red_16mask),"m"(green_16mask));
+    mm_end = end - 11;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movd          %1, %%mm0    \n\t"
+            "movd         3%1, %%mm3    \n\t"
+            "punpckldq    6%1, %%mm0    \n\t"
+            "punpckldq    9%1, %%mm3    \n\t"
+            "movq       %%mm0, %%mm1    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm3, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "psrlq         $3, %%mm0    \n\t"
+            "psrlq         $3, %%mm3    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %2, %%mm3    \n\t"
+            "psrlq         $5, %%mm1    \n\t"
+            "psrlq         $5, %%mm4    \n\t"
+            "pand       %%mm6, %%mm1    \n\t"
+            "pand       %%mm6, %%mm4    \n\t"
+            "psrlq         $8, %%mm2    \n\t"
+            "psrlq         $8, %%mm5    \n\t"
+            "pand       %%mm7, %%mm2    \n\t"
+            "pand       %%mm7, %%mm5    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+            "psllq        $16, %%mm3    \n\t"
+            "por        %%mm3, %%mm0    \n\t"
+            MOVNTQ"     %%mm0, %0       \n\t"
+            :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+        d += 4;
+        s += 12;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        const int b = *s++;
+        const int g = *s++;
+        const int r = *s++;
+        *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+    }
+}
+
+static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    uint16_t *d = (uint16_t *)dst;
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*src):"memory");
+    __asm__ volatile(
+        "movq         %0, %%mm7     \n\t"
+        "movq         %1, %%mm6     \n\t"
+        ::"m"(red_16mask),"m"(green_16mask));
+    mm_end = end - 15;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movd          %1, %%mm0    \n\t"
+            "movd         3%1, %%mm3    \n\t"
+            "punpckldq    6%1, %%mm0    \n\t"
+            "punpckldq    9%1, %%mm3    \n\t"
+            "movq       %%mm0, %%mm1    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm3, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "psllq         $8, %%mm0    \n\t"
+            "psllq         $8, %%mm3    \n\t"
+            "pand       %%mm7, %%mm0    \n\t"
+            "pand       %%mm7, %%mm3    \n\t"
+            "psrlq         $5, %%mm1    \n\t"
+            "psrlq         $5, %%mm4    \n\t"
+            "pand       %%mm6, %%mm1    \n\t"
+            "pand       %%mm6, %%mm4    \n\t"
+            "psrlq        $19, %%mm2    \n\t"
+            "psrlq        $19, %%mm5    \n\t"
+            "pand          %2, %%mm2    \n\t"
+            "pand          %2, %%mm5    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+            "psllq        $16, %%mm3    \n\t"
+            "por        %%mm3, %%mm0    \n\t"
+            MOVNTQ"     %%mm0, %0       \n\t"
+            :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+        d += 4;
+        s += 12;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        const int r = *s++;
+        const int g = *s++;
+        const int b = *s++;
+        *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+    }
+}
+
+static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    uint16_t *d = (uint16_t *)dst;
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*src):"memory");
+    __asm__ volatile(
+        "movq          %0, %%mm7    \n\t"
+        "movq          %1, %%mm6    \n\t"
+        ::"m"(red_15mask),"m"(green_15mask));
+    mm_end = end - 11;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movd          %1, %%mm0    \n\t"
+            "movd         3%1, %%mm3    \n\t"
+            "punpckldq    6%1, %%mm0    \n\t"
+            "punpckldq    9%1, %%mm3    \n\t"
+            "movq       %%mm0, %%mm1    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm3, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "psrlq         $3, %%mm0    \n\t"
+            "psrlq         $3, %%mm3    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %2, %%mm3    \n\t"
+            "psrlq         $6, %%mm1    \n\t"
+            "psrlq         $6, %%mm4    \n\t"
+            "pand       %%mm6, %%mm1    \n\t"
+            "pand       %%mm6, %%mm4    \n\t"
+            "psrlq         $9, %%mm2    \n\t"
+            "psrlq         $9, %%mm5    \n\t"
+            "pand       %%mm7, %%mm2    \n\t"
+            "pand       %%mm7, %%mm5    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+            "psllq        $16, %%mm3    \n\t"
+            "por        %%mm3, %%mm0    \n\t"
+            MOVNTQ"     %%mm0, %0       \n\t"
+            :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+        d += 4;
+        s += 12;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        const int b = *s++;
+        const int g = *s++;
+        const int r = *s++;
+        *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+    }
+}
+
+static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint8_t *s = src;
+    const uint8_t *end;
+#if HAVE_MMX
+    const uint8_t *mm_end;
+#endif
+    uint16_t *d = (uint16_t *)dst;
+    end = s + src_size;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*src):"memory");
+    __asm__ volatile(
+        "movq         %0, %%mm7     \n\t"
+        "movq         %1, %%mm6     \n\t"
+        ::"m"(red_15mask),"m"(green_15mask));
+    mm_end = end - 15;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"   32%1            \n\t"
+            "movd         %1, %%mm0     \n\t"
+            "movd        3%1, %%mm3     \n\t"
+            "punpckldq   6%1, %%mm0     \n\t"
+            "punpckldq   9%1, %%mm3     \n\t"
+            "movq      %%mm0, %%mm1     \n\t"
+            "movq      %%mm0, %%mm2     \n\t"
+            "movq      %%mm3, %%mm4     \n\t"
+            "movq      %%mm3, %%mm5     \n\t"
+            "psllq        $7, %%mm0     \n\t"
+            "psllq        $7, %%mm3     \n\t"
+            "pand      %%mm7, %%mm0     \n\t"
+            "pand      %%mm7, %%mm3     \n\t"
+            "psrlq        $6, %%mm1     \n\t"
+            "psrlq        $6, %%mm4     \n\t"
+            "pand      %%mm6, %%mm1     \n\t"
+            "pand      %%mm6, %%mm4     \n\t"
+            "psrlq       $19, %%mm2     \n\t"
+            "psrlq       $19, %%mm5     \n\t"
+            "pand         %2, %%mm2     \n\t"
+            "pand         %2, %%mm5     \n\t"
+            "por       %%mm1, %%mm0     \n\t"
+            "por       %%mm4, %%mm3     \n\t"
+            "por       %%mm2, %%mm0     \n\t"
+            "por       %%mm5, %%mm3     \n\t"
+            "psllq       $16, %%mm3     \n\t"
+            "por       %%mm3, %%mm0     \n\t"
+            MOVNTQ"    %%mm0, %0        \n\t"
+            :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+        d += 4;
+        s += 12;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        const int r = *s++;
+        const int g = *s++;
+        const int b = *s++;
+        *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+    }
+}
+
+/*
+  I use less accurate approximation here by simply left-shifting the input
+  value and filling the low order bits with zeroes. This method improves PNG
+  compression but this scheme cannot reproduce white exactly, since it does
+  not generate an all-ones maximum value; the net effect is to darken the
+  image slightly.
+
+  The better method should be "left bit replication":
+
+   4 3 2 1 0
+   ---------
+   1 1 0 1 1
+
+   7 6 5 4 3  2 1 0
+   ----------------
+   1 1 0 1 1  1 1 0
+   |=======|  |===|
+       |      leftmost bits repeated to fill open bits
+       |
+   original bits
+*/
+static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint16_t *end;
+#if HAVE_MMX
+    const uint16_t *mm_end;
+#endif
+    uint8_t *d = dst;
+    const uint16_t *s = (const uint16_t*)src;
+    end = s + src_size/2;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*s):"memory");
+    mm_end = end - 7;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movq          %1, %%mm0    \n\t"
+            "movq          %1, %%mm1    \n\t"
+            "movq          %1, %%mm2    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %3, %%mm1    \n\t"
+            "pand          %4, %%mm2    \n\t"
+            "psllq         $3, %%mm0    \n\t"
+            "psrlq         $2, %%mm1    \n\t"
+            "psrlq         $7, %%mm2    \n\t"
+            "movq       %%mm0, %%mm3    \n\t"
+            "movq       %%mm1, %%mm4    \n\t"
+            "movq       %%mm2, %%mm5    \n\t"
+            "punpcklwd     %5, %%mm0    \n\t"
+            "punpcklwd     %5, %%mm1    \n\t"
+            "punpcklwd     %5, %%mm2    \n\t"
+            "punpckhwd     %5, %%mm3    \n\t"
+            "punpckhwd     %5, %%mm4    \n\t"
+            "punpckhwd     %5, %%mm5    \n\t"
+            "psllq         $8, %%mm1    \n\t"
+            "psllq        $16, %%mm2    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "psllq         $8, %%mm4    \n\t"
+            "psllq        $16, %%mm5    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+
+            "movq       %%mm0, %%mm6    \n\t"
+            "movq       %%mm3, %%mm7    \n\t"
+
+            "movq         8%1, %%mm0    \n\t"
+            "movq         8%1, %%mm1    \n\t"
+            "movq         8%1, %%mm2    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %3, %%mm1    \n\t"
+            "pand          %4, %%mm2    \n\t"
+            "psllq         $3, %%mm0    \n\t"
+            "psrlq         $2, %%mm1    \n\t"
+            "psrlq         $7, %%mm2    \n\t"
+            "movq       %%mm0, %%mm3    \n\t"
+            "movq       %%mm1, %%mm4    \n\t"
+            "movq       %%mm2, %%mm5    \n\t"
+            "punpcklwd     %5, %%mm0    \n\t"
+            "punpcklwd     %5, %%mm1    \n\t"
+            "punpcklwd     %5, %%mm2    \n\t"
+            "punpckhwd     %5, %%mm3    \n\t"
+            "punpckhwd     %5, %%mm4    \n\t"
+            "punpckhwd     %5, %%mm5    \n\t"
+            "psllq         $8, %%mm1    \n\t"
+            "psllq        $16, %%mm2    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "psllq         $8, %%mm4    \n\t"
+            "psllq        $16, %%mm5    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+
+            :"=m"(*d)
+            :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
+            :"memory");
+        /* borrowed 32 to 24 */
+        __asm__ volatile(
+            "movq       %%mm0, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "movq       %%mm6, %%mm0    \n\t"
+            "movq       %%mm7, %%mm1    \n\t"
+
+            "movq       %%mm4, %%mm6    \n\t"
+            "movq       %%mm5, %%mm7    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm1, %%mm3    \n\t"
+
+            STORE_BGR24_MMX
+
+            :"=m"(*d)
+            :"m"(*s)
+            :"memory");
+        d += 24;
+        s += 8;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        register uint16_t bgr;
+        bgr = *s++;
+        *d++ = (bgr&0x1F)<<3;
+        *d++ = (bgr&0x3E0)>>2;
+        *d++ = (bgr&0x7C00)>>7;
+    }
+}
+
+static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint16_t *end;
+#if HAVE_MMX
+    const uint16_t *mm_end;
+#endif
+    uint8_t *d = (uint8_t *)dst;
+    const uint16_t *s = (const uint16_t *)src;
+    end = s + src_size/2;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*s):"memory");
+    mm_end = end - 7;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movq          %1, %%mm0    \n\t"
+            "movq          %1, %%mm1    \n\t"
+            "movq          %1, %%mm2    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %3, %%mm1    \n\t"
+            "pand          %4, %%mm2    \n\t"
+            "psllq         $3, %%mm0    \n\t"
+            "psrlq         $3, %%mm1    \n\t"
+            "psrlq         $8, %%mm2    \n\t"
+            "movq       %%mm0, %%mm3    \n\t"
+            "movq       %%mm1, %%mm4    \n\t"
+            "movq       %%mm2, %%mm5    \n\t"
+            "punpcklwd     %5, %%mm0    \n\t"
+            "punpcklwd     %5, %%mm1    \n\t"
+            "punpcklwd     %5, %%mm2    \n\t"
+            "punpckhwd     %5, %%mm3    \n\t"
+            "punpckhwd     %5, %%mm4    \n\t"
+            "punpckhwd     %5, %%mm5    \n\t"
+            "psllq         $8, %%mm1    \n\t"
+            "psllq        $16, %%mm2    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "psllq         $8, %%mm4    \n\t"
+            "psllq        $16, %%mm5    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+
+            "movq       %%mm0, %%mm6    \n\t"
+            "movq       %%mm3, %%mm7    \n\t"
+
+            "movq         8%1, %%mm0    \n\t"
+            "movq         8%1, %%mm1    \n\t"
+            "movq         8%1, %%mm2    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %3, %%mm1    \n\t"
+            "pand          %4, %%mm2    \n\t"
+            "psllq         $3, %%mm0    \n\t"
+            "psrlq         $3, %%mm1    \n\t"
+            "psrlq         $8, %%mm2    \n\t"
+            "movq       %%mm0, %%mm3    \n\t"
+            "movq       %%mm1, %%mm4    \n\t"
+            "movq       %%mm2, %%mm5    \n\t"
+            "punpcklwd     %5, %%mm0    \n\t"
+            "punpcklwd     %5, %%mm1    \n\t"
+            "punpcklwd     %5, %%mm2    \n\t"
+            "punpckhwd     %5, %%mm3    \n\t"
+            "punpckhwd     %5, %%mm4    \n\t"
+            "punpckhwd     %5, %%mm5    \n\t"
+            "psllq         $8, %%mm1    \n\t"
+            "psllq        $16, %%mm2    \n\t"
+            "por        %%mm1, %%mm0    \n\t"
+            "por        %%mm2, %%mm0    \n\t"
+            "psllq         $8, %%mm4    \n\t"
+            "psllq        $16, %%mm5    \n\t"
+            "por        %%mm4, %%mm3    \n\t"
+            "por        %%mm5, %%mm3    \n\t"
+            :"=m"(*d)
+            :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
+            :"memory");
+        /* borrowed 32 to 24 */
+        __asm__ volatile(
+            "movq       %%mm0, %%mm4    \n\t"
+            "movq       %%mm3, %%mm5    \n\t"
+            "movq       %%mm6, %%mm0    \n\t"
+            "movq       %%mm7, %%mm1    \n\t"
+
+            "movq       %%mm4, %%mm6    \n\t"
+            "movq       %%mm5, %%mm7    \n\t"
+            "movq       %%mm0, %%mm2    \n\t"
+            "movq       %%mm1, %%mm3    \n\t"
+
+            STORE_BGR24_MMX
+
+            :"=m"(*d)
+            :"m"(*s)
+            :"memory");
+        d += 24;
+        s += 8;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        register uint16_t bgr;
+        bgr = *s++;
+        *d++ = (bgr&0x1F)<<3;
+        *d++ = (bgr&0x7E0)>>3;
+        *d++ = (bgr&0xF800)>>8;
+    }
+}
+
+/*
+ * mm0 = 00 B3 00 B2 00 B1 00 B0
+ * mm1 = 00 G3 00 G2 00 G1 00 G0
+ * mm2 = 00 R3 00 R2 00 R1 00 R0
+ * mm6 = FF FF FF FF FF FF FF FF
+ * mm7 = 00 00 00 00 00 00 00 00
+ */
+#define PACK_RGB32 \
+    "packuswb   %%mm7, %%mm0    \n\t" /* 00 00 00 00 B3 B2 B1 B0 */ \
+    "packuswb   %%mm7, %%mm1    \n\t" /* 00 00 00 00 G3 G2 G1 G0 */ \
+    "packuswb   %%mm7, %%mm2    \n\t" /* 00 00 00 00 R3 R2 R1 R0 */ \
+    "punpcklbw  %%mm1, %%mm0    \n\t" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
+    "punpcklbw  %%mm6, %%mm2    \n\t" /* FF R3 FF R2 FF R1 FF R0 */ \
+    "movq       %%mm0, %%mm3    \n\t"                               \
+    "punpcklwd  %%mm2, %%mm0    \n\t" /* FF R1 G1 B1 FF R0 G0 B0 */ \
+    "punpckhwd  %%mm2, %%mm3    \n\t" /* FF R3 G3 B3 FF R2 G2 B2 */ \
+    MOVNTQ"     %%mm0,  %0      \n\t"                               \
+    MOVNTQ"     %%mm3, 8%0      \n\t"                               \
+
+static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint16_t *end;
+#if HAVE_MMX
+    const uint16_t *mm_end;
+#endif
+    uint8_t *d = dst;
+    const uint16_t *s = (const uint16_t *)src;
+    end = s + src_size/2;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*s):"memory");
+    __asm__ volatile("pxor    %%mm7,%%mm7    \n\t":::"memory");
+    __asm__ volatile("pcmpeqd %%mm6,%%mm6    \n\t":::"memory");
+    mm_end = end - 3;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movq          %1, %%mm0    \n\t"
+            "movq          %1, %%mm1    \n\t"
+            "movq          %1, %%mm2    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %3, %%mm1    \n\t"
+            "pand          %4, %%mm2    \n\t"
+            "psllq         $3, %%mm0    \n\t"
+            "psrlq         $2, %%mm1    \n\t"
+            "psrlq         $7, %%mm2    \n\t"
+            PACK_RGB32
+            :"=m"(*d)
+            :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
+            :"memory");
+        d += 16;
+        s += 4;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        register uint16_t bgr;
+        bgr = *s++;
+#if HAVE_BIGENDIAN
+        *d++ = 255;
+        *d++ = (bgr&0x7C00)>>7;
+        *d++ = (bgr&0x3E0)>>2;
+        *d++ = (bgr&0x1F)<<3;
+#else
+        *d++ = (bgr&0x1F)<<3;
+        *d++ = (bgr&0x3E0)>>2;
+        *d++ = (bgr&0x7C00)>>7;
+        *d++ = 255;
+#endif
+    }
+}
+
+static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    const uint16_t *end;
+#if HAVE_MMX
+    const uint16_t *mm_end;
+#endif
+    uint8_t *d = dst;
+    const uint16_t *s = (const uint16_t*)src;
+    end = s + src_size/2;
+#if HAVE_MMX
+    __asm__ volatile(PREFETCH"    %0"::"m"(*s):"memory");
+    __asm__ volatile("pxor    %%mm7,%%mm7    \n\t":::"memory");
+    __asm__ volatile("pcmpeqd %%mm6,%%mm6    \n\t":::"memory");
+    mm_end = end - 3;
+    while (s < mm_end) {
+        __asm__ volatile(
+            PREFETCH"    32%1           \n\t"
+            "movq          %1, %%mm0    \n\t"
+            "movq          %1, %%mm1    \n\t"
+            "movq          %1, %%mm2    \n\t"
+            "pand          %2, %%mm0    \n\t"
+            "pand          %3, %%mm1    \n\t"
+            "pand          %4, %%mm2    \n\t"
+            "psllq         $3, %%mm0    \n\t"
+            "psrlq         $3, %%mm1    \n\t"
+            "psrlq         $8, %%mm2    \n\t"
+            PACK_RGB32
+            :"=m"(*d)
+            :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
+            :"memory");
+        d += 16;
+        s += 4;
+    }
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+#endif
+    while (s < end) {
+        register uint16_t bgr;
+        bgr = *s++;
+#if HAVE_BIGENDIAN
+        *d++ = 255;
+        *d++ = (bgr&0xF800)>>8;
+        *d++ = (bgr&0x7E0)>>3;
+        *d++ = (bgr&0x1F)<<3;
+#else
+        *d++ = (bgr&0x1F)<<3;
+        *d++ = (bgr&0x7E0)>>3;
+        *d++ = (bgr&0xF800)>>8;
+        *d++ = 255;
+#endif
+    }
+}
+
+static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    x86_reg idx = 15 - src_size;
+    const uint8_t *s = src-idx;
+    uint8_t *d = dst-idx;
+#if HAVE_MMX
+    __asm__ volatile(
+        "test          %0, %0           \n\t"
+        "jns           2f               \n\t"
+        PREFETCH"       (%1, %0)        \n\t"
+        "movq          %3, %%mm7        \n\t"
+        "pxor          %4, %%mm7        \n\t"
+        "movq       %%mm7, %%mm6        \n\t"
+        "pxor          %5, %%mm7        \n\t"
+        ASMALIGN(4)
+        "1:                             \n\t"
+        PREFETCH"     32(%1, %0)        \n\t"
+        "movq           (%1, %0), %%mm0 \n\t"
+        "movq          8(%1, %0), %%mm1 \n\t"
+# if HAVE_MMX2
+        "pshufw      $177, %%mm0, %%mm3 \n\t"
+        "pshufw      $177, %%mm1, %%mm5 \n\t"
+        "pand       %%mm7, %%mm0        \n\t"
+        "pand       %%mm6, %%mm3        \n\t"
+        "pand       %%mm7, %%mm1        \n\t"
+        "pand       %%mm6, %%mm5        \n\t"
+        "por        %%mm3, %%mm0        \n\t"
+        "por        %%mm5, %%mm1        \n\t"
+# else
+        "movq       %%mm0, %%mm2        \n\t"
+        "movq       %%mm1, %%mm4        \n\t"
+        "pand       %%mm7, %%mm0        \n\t"
+        "pand       %%mm6, %%mm2        \n\t"
+        "pand       %%mm7, %%mm1        \n\t"
+        "pand       %%mm6, %%mm4        \n\t"
+        "movq       %%mm2, %%mm3        \n\t"
+        "movq       %%mm4, %%mm5        \n\t"
+        "pslld        $16, %%mm2        \n\t"
+        "psrld        $16, %%mm3        \n\t"
+        "pslld        $16, %%mm4        \n\t"
+        "psrld        $16, %%mm5        \n\t"
+        "por        %%mm2, %%mm0        \n\t"
+        "por        %%mm4, %%mm1        \n\t"
+        "por        %%mm3, %%mm0        \n\t"
+        "por        %%mm5, %%mm1        \n\t"
+# endif
+        MOVNTQ"     %%mm0,  (%2, %0)    \n\t"
+        MOVNTQ"     %%mm1, 8(%2, %0)    \n\t"
+        "add          $16, %0           \n\t"
+        "js            1b               \n\t"
+        SFENCE"                         \n\t"
+        EMMS"                           \n\t"
+        "2:                             \n\t"
+        : "+&r"(idx)
+        : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one)
+        : "memory");
+#endif
+    for (; idx<15; idx+=4) {
+        register int v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
+        v &= 0xff00ff;
+        *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16);
+    }
+}
+
+static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
+{
+    unsigned i;
+#if HAVE_MMX
+    x86_reg mmx_size= 23 - src_size;
+    __asm__ volatile (
+        "test             %%"REG_a", %%"REG_a"          \n\t"
+        "jns                     2f                     \n\t"
+        "movq     "MANGLE(mask24r)", %%mm5              \n\t"
+        "movq     "MANGLE(mask24g)", %%mm6              \n\t"
+        "movq     "MANGLE(mask24b)", %%mm7              \n\t"
+        ASMALIGN(4)
+        "1:                                             \n\t"
+        PREFETCH" 32(%1, %%"REG_a")                     \n\t"
+        "movq       (%1, %%"REG_a"), %%mm0              \n\t" // BGR BGR BG
+        "movq       (%1, %%"REG_a"), %%mm1              \n\t" // BGR BGR BG
+        "movq      2(%1, %%"REG_a"), %%mm2              \n\t" // R BGR BGR B
+        "psllq                  $16, %%mm0              \n\t" // 00 BGR BGR
+        "pand                 %%mm5, %%mm0              \n\t"
+        "pand                 %%mm6, %%mm1              \n\t"
+        "pand                 %%mm7, %%mm2              \n\t"
+        "por                  %%mm0, %%mm1              \n\t"
+        "por                  %%mm2, %%mm1              \n\t"
+        "movq      6(%1, %%"REG_a"), %%mm0              \n\t" // BGR BGR BG
+        MOVNTQ"               %%mm1,   (%2, %%"REG_a")  \n\t" // RGB RGB RG
+        "movq      8(%1, %%"REG_a"), %%mm1              \n\t" // R BGR BGR B
+        "movq     10(%1, %%"REG_a"), %%mm2              \n\t" // GR BGR BGR
+        "pand                 %%mm7, %%mm0              \n\t"
+        "pand                 %%mm5, %%mm1              \n\t"
+        "pand                 %%mm6, %%mm2              \n\t"
+        "por                  %%mm0, %%mm1              \n\t"
+        "por                  %%mm2, %%mm1              \n\t"
+        "movq     14(%1, %%"REG_a"), %%mm0              \n\t" // R BGR BGR B
+        MOVNTQ"               %%mm1,  8(%2, %%"REG_a")  \n\t" // B RGB RGB R
+        "movq     16(%1, %%"REG_a"), %%mm1              \n\t" // GR BGR BGR
+        "movq     18(%1, %%"REG_a"), %%mm2              \n\t" // BGR BGR BG
+        "pand                 %%mm6, %%mm0              \n\t"
+        "pand                 %%mm7, %%mm1              \n\t"
+        "pand                 %%mm5, %%mm2              \n\t"
+        "por                  %%mm0, %%mm1              \n\t"
+        "por                  %%mm2, %%mm1              \n\t"
+        MOVNTQ"               %%mm1, 16(%2, %%"REG_a")  \n\t"
+        "add                    $24, %%"REG_a"          \n\t"
+        " js                     1b                     \n\t"
+        "2:                                             \n\t"
+        : "+a" (mmx_size)
+        : "r" (src-mmx_size), "r"(dst-mmx_size)
+    );
+
+    __asm__ volatile(SFENCE:::"memory");
+    __asm__ volatile(EMMS:::"memory");
+
+    if (mmx_size==23) return; //finished, was multiple of 8
+
+    src+= src_size;
+    dst+= src_size;
+    src_size= 23-mmx_size;
+    src-= src_size;
+    dst-= src_size;
+#endif
+    for (i=0; i<src_size; i+=3) {
+        register uint8_t x;
+        x          = src[i + 2];
+        dst[i + 1] = src[i + 1];
+        dst[i + 2] = src[i + 0];
+        dst[i + 0] = x;
+    }
+}
+
+static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                                           long width, long height,
+                                           long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
+{
+    long y;
+    const x86_reg chromWidth= width>>1;
+    for (y=0; y<height; y++) {
+#if HAVE_MMX
+        //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
+        __asm__ volatile(
+            "xor                 %%"REG_a", %%"REG_a"   \n\t"
+            ASMALIGN(4)
+            "1:                                         \n\t"
+            PREFETCH"    32(%1, %%"REG_a", 2)           \n\t"
+            PREFETCH"    32(%2, %%"REG_a")              \n\t"
+            PREFETCH"    32(%3, %%"REG_a")              \n\t"
+            "movq          (%2, %%"REG_a"), %%mm0       \n\t" // U(0)
+            "movq                    %%mm0, %%mm2       \n\t" // U(0)
+            "movq          (%3, %%"REG_a"), %%mm1       \n\t" // V(0)
+            "punpcklbw               %%mm1, %%mm0       \n\t" // UVUV UVUV(0)
+            "punpckhbw               %%mm1, %%mm2       \n\t" // UVUV UVUV(8)
+
+            "movq        (%1, %%"REG_a",2), %%mm3       \n\t" // Y(0)
+            "movq       8(%1, %%"REG_a",2), %%mm5       \n\t" // Y(8)
+            "movq                    %%mm3, %%mm4       \n\t" // Y(0)
+            "movq                    %%mm5, %%mm6       \n\t" // Y(8)
+            "punpcklbw               %%mm0, %%mm3       \n\t" // YUYV YUYV(0)
+            "punpckhbw               %%mm0, %%mm4       \n\t" // YUYV YUYV(4)
+            "punpcklbw               %%mm2, %%mm5       \n\t" // YUYV YUYV(8)
+            "punpckhbw               %%mm2, %%mm6       \n\t" // YUYV YUYV(12)
+
+            MOVNTQ"                  %%mm3,   (%0, %%"REG_a", 4)    \n\t"
+            MOVNTQ"                  %%mm4,  8(%0, %%"REG_a", 4)    \n\t"
+            MOVNTQ"                  %%mm5, 16(%0, %%"REG_a", 4)    \n\t"
+            MOVNTQ"                  %%mm6, 24(%0, %%"REG_a", 4)    \n\t"
+
+            "add                        $8, %%"REG_a"   \n\t"
+            "cmp                        %4, %%"REG_a"   \n\t"
+            " jb                        1b              \n\t"
+            ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
+            : "%"REG_a
+        );
+#else
+
+#if ARCH_ALPHA && HAVE_MVI
+#define pl2yuy2(n)                  \
+    y1 = yc[n];                     \
+    y2 = yc2[n];                    \
+    u = uc[n];                      \
+    v = vc[n];                      \
+    __asm__("unpkbw %1, %0" : "=r"(y1) : "r"(y1));  \
+    __asm__("unpkbw %1, %0" : "=r"(y2) : "r"(y2));  \
+    __asm__("unpkbl %1, %0" : "=r"(u) : "r"(u));    \
+    __asm__("unpkbl %1, %0" : "=r"(v) : "r"(v));    \
+    yuv1 = (u << 8) + (v << 24);                \
+    yuv2 = yuv1 + y2;               \
+    yuv1 += y1;                     \
+    qdst[n]  = yuv1;                \
+    qdst2[n] = yuv2;
+
+        int i;
+        uint64_t *qdst = (uint64_t *) dst;
+        uint64_t *qdst2 = (uint64_t *) (dst + dstStride);
+        const uint32_t *yc = (uint32_t *) ysrc;
+        const uint32_t *yc2 = (uint32_t *) (ysrc + lumStride);
+        const uint16_t *uc = (uint16_t*) usrc, *vc = (uint16_t*) vsrc;
+        for (i = 0; i < chromWidth; i += 8) {
+            uint64_t y1, y2, yuv1, yuv2;
+            uint64_t u, v;
+            /* Prefetch */
+            __asm__("ldq $31,64(%0)" :: "r"(yc));
+            __asm__("ldq $31,64(%0)" :: "r"(yc2));
+            __asm__("ldq $31,64(%0)" :: "r"(uc));
+            __asm__("ldq $31,64(%0)" :: "r"(vc));
+
+            pl2yuy2(0);
+            pl2yuy2(1);
+            pl2yuy2(2);
+            pl2yuy2(3);
+
+            yc    += 4;
+            yc2   += 4;
+            uc    += 4;
+            vc    += 4;
+            qdst  += 4;
+            qdst2 += 4;
+        }
+        y++;
+        ysrc += lumStride;
+        dst += dstStride;
+
+#elif HAVE_FAST_64BIT
+        int i;
+        uint64_t *ldst = (uint64_t *) dst;
+        const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+        for (i = 0; i < chromWidth; i += 2) {
+            uint64_t k, l;
+            k = yc[0] + (uc[0] << 8) +
+                (yc[1] << 16) + (vc[0] << 24);
+            l = yc[2] + (uc[1] << 8) +
+                (yc[3] << 16) + (vc[1] << 24);
+            *ldst++ = k + (l << 32);
+            yc += 4;
+            uc += 2;
+            vc += 2;
+        }
+
+#else
+        int i, *idst = (int32_t *) dst;
+        const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+        for (i = 0; i < chromWidth; i++) {
+#if HAVE_BIGENDIAN
+            *idst++ = (yc[0] << 24)+ (uc[0] << 16) +
+                (yc[1] << 8) + (vc[0] << 0);
+#else
+            *idst++ = yc[0] + (uc[0] << 8) +
+                (yc[1] << 16) + (vc[0] << 24);
+#endif
+            yc += 2;
+            uc++;
+            vc++;
+        }
+#endif
+#endif
+        if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
+            usrc += chromStride;
+            vsrc += chromStride;
+        }
+        ysrc += lumStride;
+        dst  += dstStride;
+    }
+#if HAVE_MMX
+    __asm__(EMMS"       \n\t"
+            SFENCE"     \n\t"
+            :::"memory");
+#endif
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                                      long width, long height,
+                                      long lumStride, long chromStride, long dstStride)
+{
+    //FIXME interpolate chroma
+    RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+}
+
+static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                                           long width, long height,
+                                           long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
+{
+    long y;
+    const x86_reg chromWidth= width>>1;
+    for (y=0; y<height; y++) {
+#if HAVE_MMX
+        //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
+        __asm__ volatile(
+            "xor                %%"REG_a", %%"REG_a"    \n\t"
+            ASMALIGN(4)
+            "1:                                         \n\t"
+            PREFETCH"   32(%1, %%"REG_a", 2)            \n\t"
+            PREFETCH"   32(%2, %%"REG_a")               \n\t"
+            PREFETCH"   32(%3, %%"REG_a")               \n\t"
+            "movq         (%2, %%"REG_a"), %%mm0        \n\t" // U(0)
+            "movq                   %%mm0, %%mm2        \n\t" // U(0)
+            "movq         (%3, %%"REG_a"), %%mm1        \n\t" // V(0)
+            "punpcklbw              %%mm1, %%mm0        \n\t" // UVUV UVUV(0)
+            "punpckhbw              %%mm1, %%mm2        \n\t" // UVUV UVUV(8)
+
+            "movq       (%1, %%"REG_a",2), %%mm3        \n\t" // Y(0)
+            "movq      8(%1, %%"REG_a",2), %%mm5        \n\t" // Y(8)
+            "movq                   %%mm0, %%mm4        \n\t" // Y(0)
+            "movq                   %%mm2, %%mm6        \n\t" // Y(8)
+            "punpcklbw              %%mm3, %%mm0        \n\t" // YUYV YUYV(0)
+            "punpckhbw              %%mm3, %%mm4        \n\t" // YUYV YUYV(4)
+            "punpcklbw              %%mm5, %%mm2        \n\t" // YUYV YUYV(8)
+            "punpckhbw              %%mm5, %%mm6        \n\t" // YUYV YUYV(12)
+
+            MOVNTQ"                 %%mm0,   (%0, %%"REG_a", 4)     \n\t"
+            MOVNTQ"                 %%mm4,  8(%0, %%"REG_a", 4)     \n\t"
+            MOVNTQ"                 %%mm2, 16(%0, %%"REG_a", 4)     \n\t"
+            MOVNTQ"                 %%mm6, 24(%0, %%"REG_a", 4)     \n\t"
+
+            "add                       $8, %%"REG_a"    \n\t"
+            "cmp                       %4, %%"REG_a"    \n\t"
+            " jb                       1b               \n\t"
+            ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
+            : "%"REG_a
+        );
+#else
+//FIXME adapt the Alpha ASM code from yv12->yuy2
+
+#if HAVE_FAST_64BIT
+        int i;
+        uint64_t *ldst = (uint64_t *) dst;
+        const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+        for (i = 0; i < chromWidth; i += 2) {
+            uint64_t k, l;
+            k = uc[0] + (yc[0] << 8) +
+                (vc[0] << 16) + (yc[1] << 24);
+            l = uc[1] + (yc[2] << 8) +
+                (vc[1] << 16) + (yc[3] << 24);
+            *ldst++ = k + (l << 32);
+            yc += 4;
+            uc += 2;
+            vc += 2;
+        }
+
+#else
+        int i, *idst = (int32_t *) dst;
+        const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+        for (i = 0; i < chromWidth; i++) {
+#if HAVE_BIGENDIAN
+            *idst++ = (uc[0] << 24)+ (yc[0] << 16) +
+                (vc[0] << 8) + (yc[1] << 0);
+#else
+            *idst++ = uc[0] + (yc[0] << 8) +
+               (vc[0] << 16) + (yc[1] << 24);
+#endif
+            yc += 2;
+            uc++;
+            vc++;
+        }
+#endif
+#endif
+        if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
+            usrc += chromStride;
+            vsrc += chromStride;
+        }
+        ysrc += lumStride;
+        dst += dstStride;
+    }
+#if HAVE_MMX
+    __asm__(EMMS"       \n\t"
+            SFENCE"     \n\t"
+            :::"memory");
+#endif
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                                      long width, long height,
+                                      long lumStride, long chromStride, long dstStride)
+{
+    //FIXME interpolate chroma
+    RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+}
+
+/**
+ * Width should be a multiple of 16.
+ */
+static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                                         long width, long height,
+                                         long lumStride, long chromStride, long dstStride)
+{
+    RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
+}
+
+/**
+ * Width should be a multiple of 16.
+ */
+static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
+                                         long width, long height,
+                                         long lumStride, long chromStride, long dstStride)
+{
+    RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ */
+static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                                      long width, long height,
+                                      long lumStride, long chromStride, long srcStride)
+{
+    long y;
+    const x86_reg chromWidth= width>>1;
+    for (y=0; y<height; y+=2) {
+#if HAVE_MMX
+        __asm__ volatile(
+            "xor                 %%"REG_a", %%"REG_a"   \n\t"
+            "pcmpeqw                 %%mm7, %%mm7       \n\t"
+            "psrlw                      $8, %%mm7       \n\t" // FF,00,FF,00...
+            ASMALIGN(4)
+            "1:                \n\t"
+            PREFETCH" 64(%0, %%"REG_a", 4)              \n\t"
+            "movq       (%0, %%"REG_a", 4), %%mm0       \n\t" // YUYV YUYV(0)
+            "movq      8(%0, %%"REG_a", 4), %%mm1       \n\t" // YUYV YUYV(4)
+            "movq                    %%mm0, %%mm2       \n\t" // YUYV YUYV(0)
+            "movq                    %%mm1, %%mm3       \n\t" // YUYV YUYV(4)
+            "psrlw                      $8, %%mm0       \n\t" // U0V0 U0V0(0)
+            "psrlw                      $8, %%mm1       \n\t" // U0V0 U0V0(4)
+            "pand                    %%mm7, %%mm2       \n\t" // Y0Y0 Y0Y0(0)
+            "pand                    %%mm7, %%mm3       \n\t" // Y0Y0 Y0Y0(4)
+            "packuswb                %%mm1, %%mm0       \n\t" // UVUV UVUV(0)
+            "packuswb                %%mm3, %%mm2       \n\t" // YYYY YYYY(0)
+
+            MOVNTQ"                  %%mm2, (%1, %%"REG_a", 2)  \n\t"
+
+            "movq     16(%0, %%"REG_a", 4), %%mm1       \n\t" // YUYV YUYV(8)
+            "movq     24(%0, %%"REG_a", 4), %%mm2       \n\t" // YUYV YUYV(12)
+            "movq                    %%mm1, %%mm3       \n\t" // YUYV YUYV(8)
+            "movq                    %%mm2, %%mm4       \n\t" // YUYV YUYV(12)
+            "psrlw                      $8, %%mm1       \n\t" // U0V0 U0V0(8)
+            "psrlw                      $8, %%mm2       \n\t" // U0V0 U0V0(12)
+            "pand                    %%mm7, %%mm3       \n\t" // Y0Y0 Y0Y0(8)
+            "pand                    %%mm7, %%mm4       \n\t" // Y0Y0 Y0Y0(12)
+            "packuswb                %%mm2, %%mm1       \n\t" // UVUV UVUV(8)
+            "packuswb                %%mm4, %%mm3       \n\t" // YYYY YYYY(8)
+
+            MOVNTQ"                  %%mm3, 8(%1, %%"REG_a", 2) \n\t"
+
+            "movq                    %%mm0, %%mm2       \n\t" // UVUV UVUV(0)
+            "movq                    %%mm1, %%mm3       \n\t" // UVUV UVUV(8)
+            "psrlw                      $8, %%mm0       \n\t" // V0V0 V0V0(0)
+            "psrlw                      $8, %%mm1       \n\t" // V0V0 V0V0(8)
+            "pand                    %%mm7, %%mm2       \n\t" // U0U0 U0U0(0)
+            "pand                    %%mm7, %%mm3       \n\t" // U0U0 U0U0(8)
+            "packuswb                %%mm1, %%mm0       \n\t" // VVVV VVVV(0)
+            "packuswb                %%mm3, %%mm2       \n\t" // UUUU UUUU(0)
+
+            MOVNTQ"                  %%mm0, (%3, %%"REG_a")     \n\t"
+            MOVNTQ"                  %%mm2, (%2, %%"REG_a")     \n\t"
+
+            "add                        $8, %%"REG_a"   \n\t"
+            "cmp                        %4, %%"REG_a"   \n\t"
+            " jb                        1b              \n\t"
+            ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+            : "memory", "%"REG_a
+        );
+
+        ydst += lumStride;
+        src  += srcStride;
+
+        __asm__ volatile(
+            "xor                 %%"REG_a", %%"REG_a"   \n\t"
+            ASMALIGN(4)
+            "1:                                         \n\t"
+            PREFETCH" 64(%0, %%"REG_a", 4)              \n\t"
+            "movq       (%0, %%"REG_a", 4), %%mm0       \n\t" // YUYV YUYV(0)
+            "movq      8(%0, %%"REG_a", 4), %%mm1       \n\t" // YUYV YUYV(4)
+            "movq     16(%0, %%"REG_a", 4), %%mm2       \n\t" // YUYV YUYV(8)
+            "movq     24(%0, %%"REG_a", 4), %%mm3       \n\t" // YUYV YUYV(12)
+            "pand                    %%mm7, %%mm0       \n\t" // Y0Y0 Y0Y0(0)
+            "pand                    %%mm7, %%mm1       \n\t" // Y0Y0 Y0Y0(4)
+            "pand                    %%mm7, %%mm2       \n\t" // Y0Y0 Y0Y0(8)
+            "pand                    %%mm7, %%mm3       \n\t" // Y0Y0 Y0Y0(12)
+            "packuswb                %%mm1, %%mm0       \n\t" // YYYY YYYY(0)
+            "packuswb                %%mm3, %%mm2       \n\t" // YYYY YYYY(8)
+
+            MOVNTQ"                  %%mm0,  (%1, %%"REG_a", 2) \n\t"
+            MOVNTQ"                  %%mm2, 8(%1, %%"REG_a", 2) \n\t"
+
+            "add                        $8, %%"REG_a"   \n\t"
+            "cmp                        %4, %%"REG_a"   \n\t"
+            " jb                        1b              \n\t"
+
+            ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+            : "memory", "%"REG_a
+        );
+#else
+        long i;
+        for (i=0; i<chromWidth; i++) {
+            ydst[2*i+0]     = src[4*i+0];
+            udst[i]     = src[4*i+1];
+            ydst[2*i+1]     = src[4*i+2];
+            vdst[i]     = src[4*i+3];
+        }
+        ydst += lumStride;
+        src  += srcStride;
+
+        for (i=0; i<chromWidth; i++) {
+            ydst[2*i+0]     = src[4*i+0];
+            ydst[2*i+1]     = src[4*i+2];
+        }
+#endif
+        udst += chromStride;
+        vdst += chromStride;
+        ydst += lumStride;
+        src  += srcStride;
+    }
+#if HAVE_MMX
+    __asm__ volatile(EMMS"       \n\t"
+                     SFENCE"     \n\t"
+                     :::"memory");
+#endif
+}
+
+static inline void RENAME(yvu9toyv12)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc,
+                                      uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                                      long width, long height, long lumStride, long chromStride)
+{
+    /* Y Plane */
+    memcpy(ydst, ysrc, width*height);
+
+    /* XXX: implement upscaling for U,V */
+}
+
+static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWidth, long srcHeight, long srcStride, long dstStride)
+{
+    long x,y;
+
+    dst[0]= src[0];
+
+    // first line
+    for (x=0; x<srcWidth-1; x++) {
+        dst[2*x+1]= (3*src[x] +   src[x+1])>>2;
+        dst[2*x+2]= (  src[x] + 3*src[x+1])>>2;
+    }
+    dst[2*srcWidth-1]= src[srcWidth-1];
+
+    dst+= dstStride;
+
+    for (y=1; y<srcHeight; y++) {
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+        const x86_reg mmxSize= srcWidth&~15;
+        __asm__ volatile(
+            "mov           %4, %%"REG_a"            \n\t"
+            "1:                                     \n\t"
+            "movq         (%0, %%"REG_a"), %%mm0    \n\t"
+            "movq         (%1, %%"REG_a"), %%mm1    \n\t"
+            "movq        1(%0, %%"REG_a"), %%mm2    \n\t"
+            "movq        1(%1, %%"REG_a"), %%mm3    \n\t"
+            "movq       -1(%0, %%"REG_a"), %%mm4    \n\t"
+            "movq       -1(%1, %%"REG_a"), %%mm5    \n\t"
+            PAVGB"                  %%mm0, %%mm5    \n\t"
+            PAVGB"                  %%mm0, %%mm3    \n\t"
+            PAVGB"                  %%mm0, %%mm5    \n\t"
+            PAVGB"                  %%mm0, %%mm3    \n\t"
+            PAVGB"                  %%mm1, %%mm4    \n\t"
+            PAVGB"                  %%mm1, %%mm2    \n\t"
+            PAVGB"                  %%mm1, %%mm4    \n\t"
+            PAVGB"                  %%mm1, %%mm2    \n\t"
+            "movq                   %%mm5, %%mm7    \n\t"
+            "movq                   %%mm4, %%mm6    \n\t"
+            "punpcklbw              %%mm3, %%mm5    \n\t"
+            "punpckhbw              %%mm3, %%mm7    \n\t"
+            "punpcklbw              %%mm2, %%mm4    \n\t"
+            "punpckhbw              %%mm2, %%mm6    \n\t"
+#if 1
+            MOVNTQ"                 %%mm5,  (%2, %%"REG_a", 2)  \n\t"
+            MOVNTQ"                 %%mm7, 8(%2, %%"REG_a", 2)  \n\t"
+            MOVNTQ"                 %%mm4,  (%3, %%"REG_a", 2)  \n\t"
+            MOVNTQ"                 %%mm6, 8(%3, %%"REG_a", 2)  \n\t"
+#else
+            "movq                   %%mm5,  (%2, %%"REG_a", 2)  \n\t"
+            "movq                   %%mm7, 8(%2, %%"REG_a", 2)  \n\t"
+            "movq                   %%mm4,  (%3, %%"REG_a", 2)  \n\t"
+            "movq                   %%mm6, 8(%3, %%"REG_a", 2)  \n\t"
+#endif
+            "add                       $8, %%"REG_a"            \n\t"
+            " js                       1b                       \n\t"
+            :: "r" (src + mmxSize  ), "r" (src + srcStride + mmxSize  ),
+            "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
+            "g" (-mmxSize)
+            : "%"REG_a
+
+        );
+#else
+        const x86_reg mmxSize=1;
+#endif
+        dst[0        ]= (3*src[0] +   src[srcStride])>>2;
+        dst[dstStride]= (  src[0] + 3*src[srcStride])>>2;
+
+        for (x=mmxSize-1; x<srcWidth-1; x++) {
+            dst[2*x          +1]= (3*src[x+0] +   src[x+srcStride+1])>>2;
+            dst[2*x+dstStride+2]= (  src[x+0] + 3*src[x+srcStride+1])>>2;
+            dst[2*x+dstStride+1]= (  src[x+1] + 3*src[x+srcStride  ])>>2;
+            dst[2*x          +2]= (3*src[x+1] +   src[x+srcStride  ])>>2;
+        }
+        dst[srcWidth*2 -1            ]= (3*src[srcWidth-1] +   src[srcWidth-1 + srcStride])>>2;
+        dst[srcWidth*2 -1 + dstStride]= (  src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
+
+        dst+=dstStride*2;
+        src+=srcStride;
+    }
+
+    // last line
+#if 1
+    dst[0]= src[0];
+
+    for (x=0; x<srcWidth-1; x++) {
+        dst[2*x+1]= (3*src[x] +   src[x+1])>>2;
+        dst[2*x+2]= (  src[x] + 3*src[x+1])>>2;
+    }
+    dst[2*srcWidth-1]= src[srcWidth-1];
+#else
+    for (x=0; x<srcWidth; x++) {
+        dst[2*x+0]=
+        dst[2*x+1]= src[x];
+    }
+#endif
+
+#if HAVE_MMX
+    __asm__ volatile(EMMS"       \n\t"
+                     SFENCE"     \n\t"
+                     :::"memory");
+#endif
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 16.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line, others are ignored.
+ * FIXME: Write HQ version.
+ */
+static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                                      long width, long height,
+                                      long lumStride, long chromStride, long srcStride)
+{
+    long y;
+    const x86_reg chromWidth= width>>1;
+    for (y=0; y<height; y+=2) {
+#if HAVE_MMX
+        __asm__ volatile(
+            "xor                 %%"REG_a", %%"REG_a"   \n\t"
+            "pcmpeqw             %%mm7, %%mm7   \n\t"
+            "psrlw                  $8, %%mm7   \n\t" // FF,00,FF,00...
+            ASMALIGN(4)
+            "1:                                 \n\t"
+            PREFETCH" 64(%0, %%"REG_a", 4)          \n\t"
+            "movq       (%0, %%"REG_a", 4), %%mm0   \n\t" // UYVY UYVY(0)
+            "movq      8(%0, %%"REG_a", 4), %%mm1   \n\t" // UYVY UYVY(4)
+            "movq                %%mm0, %%mm2   \n\t" // UYVY UYVY(0)
+            "movq                %%mm1, %%mm3   \n\t" // UYVY UYVY(4)
+            "pand                %%mm7, %%mm0   \n\t" // U0V0 U0V0(0)
+            "pand                %%mm7, %%mm1   \n\t" // U0V0 U0V0(4)
+            "psrlw                  $8, %%mm2   \n\t" // Y0Y0 Y0Y0(0)
+            "psrlw                  $8, %%mm3   \n\t" // Y0Y0 Y0Y0(4)
+            "packuswb            %%mm1, %%mm0   \n\t" // UVUV UVUV(0)
+            "packuswb            %%mm3, %%mm2   \n\t" // YYYY YYYY(0)
+
+            MOVNTQ"              %%mm2,  (%1, %%"REG_a", 2) \n\t"
+
+            "movq     16(%0, %%"REG_a", 4), %%mm1   \n\t" // UYVY UYVY(8)
+            "movq     24(%0, %%"REG_a", 4), %%mm2   \n\t" // UYVY UYVY(12)
+            "movq                %%mm1, %%mm3   \n\t" // UYVY UYVY(8)
+            "movq                %%mm2, %%mm4   \n\t" // UYVY UYVY(12)
+            "pand                %%mm7, %%mm1   \n\t" // U0V0 U0V0(8)
+            "pand                %%mm7, %%mm2   \n\t" // U0V0 U0V0(12)
+            "psrlw                  $8, %%mm3   \n\t" // Y0Y0 Y0Y0(8)
+            "psrlw                  $8, %%mm4   \n\t" // Y0Y0 Y0Y0(12)
+            "packuswb            %%mm2, %%mm1   \n\t" // UVUV UVUV(8)
+            "packuswb            %%mm4, %%mm3   \n\t" // YYYY YYYY(8)
+
+            MOVNTQ"              %%mm3, 8(%1, %%"REG_a", 2) \n\t"
+
+            "movq                %%mm0, %%mm2   \n\t" // UVUV UVUV(0)
+            "movq                %%mm1, %%mm3   \n\t" // UVUV UVUV(8)
+            "psrlw                  $8, %%mm0   \n\t" // V0V0 V0V0(0)
+            "psrlw                  $8, %%mm1   \n\t" // V0V0 V0V0(8)
+            "pand                %%mm7, %%mm2   \n\t" // U0U0 U0U0(0)
+            "pand                %%mm7, %%mm3   \n\t" // U0U0 U0U0(8)
+            "packuswb            %%mm1, %%mm0   \n\t" // VVVV VVVV(0)
+            "packuswb            %%mm3, %%mm2   \n\t" // UUUU UUUU(0)
+
+            MOVNTQ"              %%mm0, (%3, %%"REG_a") \n\t"
+            MOVNTQ"              %%mm2, (%2, %%"REG_a") \n\t"
+
+            "add                    $8, %%"REG_a"   \n\t"
+            "cmp                    %4, %%"REG_a"   \n\t"
+            " jb                    1b          \n\t"
+            ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+            : "memory", "%"REG_a
+        );
+
+        ydst += lumStride;
+        src  += srcStride;
+
+        __asm__ volatile(
+            "xor                 %%"REG_a", %%"REG_a"   \n\t"
+            ASMALIGN(4)
+            "1:                                 \n\t"
+            PREFETCH" 64(%0, %%"REG_a", 4)          \n\t"
+            "movq       (%0, %%"REG_a", 4), %%mm0   \n\t" // YUYV YUYV(0)
+            "movq      8(%0, %%"REG_a", 4), %%mm1   \n\t" // YUYV YUYV(4)
+            "movq     16(%0, %%"REG_a", 4), %%mm2   \n\t" // YUYV YUYV(8)
+            "movq     24(%0, %%"REG_a", 4), %%mm3   \n\t" // YUYV YUYV(12)
+            "psrlw                  $8, %%mm0   \n\t" // Y0Y0 Y0Y0(0)
+            "psrlw                  $8, %%mm1   \n\t" // Y0Y0 Y0Y0(4)
+            "psrlw                  $8, %%mm2   \n\t" // Y0Y0 Y0Y0(8)
+            "psrlw                  $8, %%mm3   \n\t" // Y0Y0 Y0Y0(12)
+            "packuswb            %%mm1, %%mm0   \n\t" // YYYY YYYY(0)
+            "packuswb            %%mm3, %%mm2   \n\t" // YYYY YYYY(8)
+
+            MOVNTQ"              %%mm0,  (%1, %%"REG_a", 2) \n\t"
+            MOVNTQ"              %%mm2, 8(%1, %%"REG_a", 2) \n\t"
+
+            "add                    $8, %%"REG_a"   \n\t"
+            "cmp                    %4, %%"REG_a"   \n\t"
+            " jb                    1b          \n\t"
+
+            ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+            : "memory", "%"REG_a
+        );
+#else
+        long i;
+        for (i=0; i<chromWidth; i++) {
+            udst[i]     = src[4*i+0];
+            ydst[2*i+0] = src[4*i+1];
+            vdst[i]     = src[4*i+2];
+            ydst[2*i+1] = src[4*i+3];
+        }
+        ydst += lumStride;
+        src  += srcStride;
+
+        for (i=0; i<chromWidth; i++) {
+            ydst[2*i+0] = src[4*i+1];
+            ydst[2*i+1] = src[4*i+3];
+        }
+#endif
+        udst += chromStride;
+        vdst += chromStride;
+        ydst += lumStride;
+        src  += srcStride;
+    }
+#if HAVE_MMX
+    __asm__ volatile(EMMS"       \n\t"
+                     SFENCE"     \n\t"
+                     :::"memory");
+#endif
+}
+
+/**
+ * Height should be a multiple of 2 and width should be a multiple of 2.
+ * (If this is a problem for anyone then tell me, and I will fix it.)
+ * Chrominance data is only taken from every second line,
+ * others are ignored in the C version.
+ * FIXME: Write HQ version.
+ */
+static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+                                       long width, long height,
+                                       long lumStride, long chromStride, long srcStride)
+{
+    long y;
+    const x86_reg chromWidth= width>>1;
+#if HAVE_MMX
+    for (y=0; y<height-2; y+=2) {
+        long i;
+        for (i=0; i<2; i++) {
+            __asm__ volatile(
+                "mov                        %2, %%"REG_a"   \n\t"
+                "movq  "MANGLE(ff_bgr2YCoeff)", %%mm6       \n\t"
+                "movq       "MANGLE(ff_w1111)", %%mm5       \n\t"
+                "pxor                    %%mm7, %%mm7       \n\t"
+                "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"   \n\t"
+                ASMALIGN(4)
+                "1:                                         \n\t"
+                PREFETCH"    64(%0, %%"REG_d")              \n\t"
+                "movd          (%0, %%"REG_d"), %%mm0       \n\t"
+                "movd         3(%0, %%"REG_d"), %%mm1       \n\t"
+                "punpcklbw               %%mm7, %%mm0       \n\t"
+                "punpcklbw               %%mm7, %%mm1       \n\t"
+                "movd         6(%0, %%"REG_d"), %%mm2       \n\t"
+                "movd         9(%0, %%"REG_d"), %%mm3       \n\t"
+                "punpcklbw               %%mm7, %%mm2       \n\t"
+                "punpcklbw               %%mm7, %%mm3       \n\t"
+                "pmaddwd                 %%mm6, %%mm0       \n\t"
+                "pmaddwd                 %%mm6, %%mm1       \n\t"
+                "pmaddwd                 %%mm6, %%mm2       \n\t"
+                "pmaddwd                 %%mm6, %%mm3       \n\t"
+#ifndef FAST_BGR2YV12
+                "psrad                      $8, %%mm0       \n\t"
+                "psrad                      $8, %%mm1       \n\t"
+                "psrad                      $8, %%mm2       \n\t"
+                "psrad                      $8, %%mm3       \n\t"
+#endif
+                "packssdw                %%mm1, %%mm0       \n\t"
+                "packssdw                %%mm3, %%mm2       \n\t"
+                "pmaddwd                 %%mm5, %%mm0       \n\t"
+                "pmaddwd                 %%mm5, %%mm2       \n\t"
+                "packssdw                %%mm2, %%mm0       \n\t"
+                "psraw                      $7, %%mm0       \n\t"
+
+                "movd        12(%0, %%"REG_d"), %%mm4       \n\t"
+                "movd        15(%0, %%"REG_d"), %%mm1       \n\t"
+                "punpcklbw               %%mm7, %%mm4       \n\t"
+                "punpcklbw               %%mm7, %%mm1       \n\t"
+                "movd        18(%0, %%"REG_d"), %%mm2       \n\t"
+                "movd        21(%0, %%"REG_d"), %%mm3       \n\t"
+                "punpcklbw               %%mm7, %%mm2       \n\t"
+                "punpcklbw               %%mm7, %%mm3       \n\t"
+                "pmaddwd                 %%mm6, %%mm4       \n\t"
+                "pmaddwd                 %%mm6, %%mm1       \n\t"
+                "pmaddwd                 %%mm6, %%mm2       \n\t"
+                "pmaddwd                 %%mm6, %%mm3       \n\t"
+#ifndef FAST_BGR2YV12
+                "psrad                      $8, %%mm4       \n\t"
+                "psrad                      $8, %%mm1       \n\t"
+                "psrad                      $8, %%mm2       \n\t"
+                "psrad                      $8, %%mm3       \n\t"
+#endif
+                "packssdw                %%mm1, %%mm4       \n\t"
+                "packssdw                %%mm3, %%mm2       \n\t"
+                "pmaddwd                 %%mm5, %%mm4       \n\t"
+                "pmaddwd                 %%mm5, %%mm2       \n\t"
+                "add                       $24, %%"REG_d"   \n\t"
+                "packssdw                %%mm2, %%mm4       \n\t"
+                "psraw                      $7, %%mm4       \n\t"
+
+                "packuswb                %%mm4, %%mm0       \n\t"
+                "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0    \n\t"
+
+                MOVNTQ"                  %%mm0, (%1, %%"REG_a") \n\t"
+                "add                        $8,      %%"REG_a"  \n\t"
+                " js                        1b                  \n\t"
+                : : "r" (src+width*3), "r" (ydst+width), "g" ((x86_reg)-width)
+                : "%"REG_a, "%"REG_d
+            );
+            ydst += lumStride;
+            src  += srcStride;
+        }
+        src -= srcStride*2;
+        __asm__ volatile(
+            "mov                        %4, %%"REG_a"   \n\t"
+            "movq       "MANGLE(ff_w1111)", %%mm5       \n\t"
+            "movq  "MANGLE(ff_bgr2UCoeff)", %%mm6       \n\t"
+            "pxor                    %%mm7, %%mm7       \n\t"
+            "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"   \n\t"
+            "add                 %%"REG_d", %%"REG_d"   \n\t"
+            ASMALIGN(4)
+            "1:                                         \n\t"
+            PREFETCH"    64(%0, %%"REG_d")              \n\t"
+            PREFETCH"    64(%1, %%"REG_d")              \n\t"
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+            "movq          (%0, %%"REG_d"), %%mm0       \n\t"
+            "movq          (%1, %%"REG_d"), %%mm1       \n\t"
+            "movq         6(%0, %%"REG_d"), %%mm2       \n\t"
+            "movq         6(%1, %%"REG_d"), %%mm3       \n\t"
+            PAVGB"                   %%mm1, %%mm0       \n\t"
+            PAVGB"                   %%mm3, %%mm2       \n\t"
+            "movq                    %%mm0, %%mm1       \n\t"
+            "movq                    %%mm2, %%mm3       \n\t"
+            "psrlq                     $24, %%mm0       \n\t"
+            "psrlq                     $24, %%mm2       \n\t"
+            PAVGB"                   %%mm1, %%mm0       \n\t"
+            PAVGB"                   %%mm3, %%mm2       \n\t"
+            "punpcklbw               %%mm7, %%mm0       \n\t"
+            "punpcklbw               %%mm7, %%mm2       \n\t"
+#else
+            "movd          (%0, %%"REG_d"), %%mm0       \n\t"
+            "movd          (%1, %%"REG_d"), %%mm1       \n\t"
+            "movd         3(%0, %%"REG_d"), %%mm2       \n\t"
+            "movd         3(%1, %%"REG_d"), %%mm3       \n\t"
+            "punpcklbw               %%mm7, %%mm0       \n\t"
+            "punpcklbw               %%mm7, %%mm1       \n\t"
+            "punpcklbw               %%mm7, %%mm2       \n\t"
+            "punpcklbw               %%mm7, %%mm3       \n\t"
+            "paddw                   %%mm1, %%mm0       \n\t"
+            "paddw                   %%mm3, %%mm2       \n\t"
+            "paddw                   %%mm2, %%mm0       \n\t"
+            "movd         6(%0, %%"REG_d"), %%mm4       \n\t"
+            "movd         6(%1, %%"REG_d"), %%mm1       \n\t"
+            "movd         9(%0, %%"REG_d"), %%mm2       \n\t"
+            "movd         9(%1, %%"REG_d"), %%mm3       \n\t"
+            "punpcklbw               %%mm7, %%mm4       \n\t"
+            "punpcklbw               %%mm7, %%mm1       \n\t"
+            "punpcklbw               %%mm7, %%mm2       \n\t"
+            "punpcklbw               %%mm7, %%mm3       \n\t"
+            "paddw                   %%mm1, %%mm4       \n\t"
+            "paddw                   %%mm3, %%mm2       \n\t"
+            "paddw                   %%mm4, %%mm2       \n\t"
+            "psrlw                      $2, %%mm0       \n\t"
+            "psrlw                      $2, %%mm2       \n\t"
+#endif
+            "movq  "MANGLE(ff_bgr2VCoeff)", %%mm1       \n\t"
+            "movq  "MANGLE(ff_bgr2VCoeff)", %%mm3       \n\t"
+
+            "pmaddwd                 %%mm0, %%mm1       \n\t"
+            "pmaddwd                 %%mm2, %%mm3       \n\t"
+            "pmaddwd                 %%mm6, %%mm0       \n\t"
+            "pmaddwd                 %%mm6, %%mm2       \n\t"
+#ifndef FAST_BGR2YV12
+            "psrad                      $8, %%mm0       \n\t"
+            "psrad                      $8, %%mm1       \n\t"
+            "psrad                      $8, %%mm2       \n\t"
+            "psrad                      $8, %%mm3       \n\t"
+#endif
+            "packssdw                %%mm2, %%mm0       \n\t"
+            "packssdw                %%mm3, %%mm1       \n\t"
+            "pmaddwd                 %%mm5, %%mm0       \n\t"
+            "pmaddwd                 %%mm5, %%mm1       \n\t"
+            "packssdw                %%mm1, %%mm0       \n\t" // V1 V0 U1 U0
+            "psraw                      $7, %%mm0       \n\t"
+
+#if HAVE_MMX2 || HAVE_AMD3DNOW
+            "movq        12(%0, %%"REG_d"), %%mm4       \n\t"
+            "movq        12(%1, %%"REG_d"), %%mm1       \n\t"
+            "movq        18(%0, %%"REG_d"), %%mm2       \n\t"
+            "movq        18(%1, %%"REG_d"), %%mm3       \n\t"
+            PAVGB"                   %%mm1, %%mm4       \n\t"
+            PAVGB"                   %%mm3, %%mm2       \n\t"
+            "movq                    %%mm4, %%mm1       \n\t"
+            "movq                    %%mm2, %%mm3       \n\t"
+            "psrlq                     $24, %%mm4       \n\t"
+            "psrlq                     $24, %%mm2       \n\t"
+            PAVGB"                   %%mm1, %%mm4       \n\t"
+            PAVGB"                   %%mm3, %%mm2       \n\t"
+            "punpcklbw               %%mm7, %%mm4       \n\t"
+            "punpcklbw               %%mm7, %%mm2       \n\t"
+#else
+            "movd        12(%0, %%"REG_d"), %%mm4       \n\t"
+            "movd        12(%1, %%"REG_d"), %%mm1       \n\t"
+            "movd        15(%0, %%"REG_d"), %%mm2       \n\t"
+            "movd        15(%1, %%"REG_d"), %%mm3       \n\t"
+            "punpcklbw               %%mm7, %%mm4       \n\t"
+            "punpcklbw               %%mm7, %%mm1       \n\t"
+            "punpcklbw               %%mm7, %%mm2       \n\t"
+            "punpcklbw               %%mm7, %%mm3       \n\t"
+            "paddw                   %%mm1, %%mm4       \n\t"
+            "paddw                   %%mm3, %%mm2       \n\t"
+            "paddw                   %%mm2, %%mm4       \n\t"
+            "movd        18(%0, %%"REG_d"), %%mm5       \n\t"
+            "movd        18(%1, %%"REG_d"), %%mm1       \n\t"
+            "movd        21(%0, %%"REG_d"), %%mm2       \n\t"
+            "movd        21(%1, %%"REG_d"), %%mm3       \n\t"
+            "punpcklbw               %%mm7, %%mm5       \n\t"
+            "punpcklbw               %%mm7, %%mm1       \n\t"
+            "punpcklbw               %%mm7, %%mm2       \n\t"
+            "punpcklbw               %%mm7, %%mm3       \n\t"
+            "paddw                   %%mm1, %%mm5       \n\t"
+            "paddw                   %%mm3, %%mm2       \n\t"
+            "paddw                   %%mm5, %%mm2       \n\t"
+            "movq       "MANGLE(ff_w1111)", %%mm5       \n\t"
+            "psrlw                      $2, %%mm4       \n\t"
+            "psrlw                      $2, %%mm2       \n\t"
+#endif
+            "movq  "MANGLE(ff_bgr2VCoeff)", %%mm1       \n\t"
+            "movq  "MANGLE(ff_bgr2VCoeff)", %%mm3       \n\t"
+
+            "pmaddwd                 %%mm4, %%mm1       \n\t"
+            "pmaddwd                 %%mm2, %%mm3       \n\t"
+            "pmaddwd                 %%mm6, %%mm4       \n\t"
+            "pmaddwd                 %%mm6, %%mm2       \n\t"
+#ifndef FAST_BGR2YV12
+            "psrad                      $8, %%mm4       \n\t"
+            "psrad                      $8, %%mm1       \n\t"
+            "psrad                      $8, %%mm2       \n\t"
+            "psrad                      $8, %%mm3       \n\t"
+#endif
+            "packssdw                %%mm2, %%mm4       \n\t"
+            "packssdw                %%mm3, %%mm1       \n\t"
+            "pmaddwd                 %%mm5, %%mm4       \n\t"
+            "pmaddwd                 %%mm5, %%mm1       \n\t"
+            "add                       $24, %%"REG_d"   \n\t"
+            "packssdw                %%mm1, %%mm4       \n\t" // V3 V2 U3 U2
+            "psraw                      $7, %%mm4       \n\t"
+
+            "movq                    %%mm0, %%mm1           \n\t"
+            "punpckldq               %%mm4, %%mm0           \n\t"
+            "punpckhdq               %%mm4, %%mm1           \n\t"
+            "packsswb                %%mm1, %%mm0           \n\t"
+            "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0         \n\t"
+            "movd                    %%mm0, (%2, %%"REG_a") \n\t"
+            "punpckhdq               %%mm0, %%mm0           \n\t"
+            "movd                    %%mm0, (%3, %%"REG_a") \n\t"
+            "add                        $4, %%"REG_a"       \n\t"
+            " js                        1b                  \n\t"
+            : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
+            : "%"REG_a, "%"REG_d
+        );
+
+        udst += chromStride;
+        vdst += chromStride;
+        src  += srcStride*2;
+    }
+
+    __asm__ volatile(EMMS"       \n\t"
+                     SFENCE"     \n\t"
+                     :::"memory");
+#else
+    y=0;
+#endif
+    for (; y<height; y+=2) {
+        long i;
+        for (i=0; i<chromWidth; i++) {
+            unsigned int b = src[6*i+0];
+            unsigned int g = src[6*i+1];
+            unsigned int r = src[6*i+2];
+
+            unsigned int Y  =  ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+            unsigned int V  =  ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128;
+            unsigned int U  =  ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128;
+
+            udst[i]     = U;
+            vdst[i]     = V;
+            ydst[2*i]   = Y;
+
+            b = src[6*i+3];
+            g = src[6*i+4];
+            r = src[6*i+5];
+
+            Y  =  ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+            ydst[2*i+1]     = Y;
+        }
+        ydst += lumStride;
+        src  += srcStride;
+
+        for (i=0; i<chromWidth; i++) {
+            unsigned int b = src[6*i+0];
+            unsigned int g = src[6*i+1];
+            unsigned int r = src[6*i+2];
+
+            unsigned int Y  =  ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+
+            ydst[2*i]     = Y;
+
+            b = src[6*i+3];
+            g = src[6*i+4];
+            r = src[6*i+5];
+
+            Y  =  ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+            ydst[2*i+1]     = Y;
+        }
+        udst += chromStride;
+        vdst += chromStride;
+        ydst += lumStride;
+        src  += srcStride;
+    }
+}
+
+static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dest,
+                             long width, long height, long src1Stride,
+                             long src2Stride, long dstStride)
+{
+    long h;
+
+    for (h=0; h < height; h++) {
+        long w;
+
+#if HAVE_MMX
+#if HAVE_SSE2
+        __asm__(
+            "xor              %%"REG_a", %%"REG_a"  \n\t"
+            "1:                                     \n\t"
+            PREFETCH" 64(%1, %%"REG_a")             \n\t"
+            PREFETCH" 64(%2, %%"REG_a")             \n\t"
+            "movdqa     (%1, %%"REG_a"), %%xmm0     \n\t"
+            "movdqa     (%1, %%"REG_a"), %%xmm1     \n\t"
+            "movdqa     (%2, %%"REG_a"), %%xmm2     \n\t"
+            "punpcklbw           %%xmm2, %%xmm0     \n\t"
+            "punpckhbw           %%xmm2, %%xmm1     \n\t"
+            "movntdq             %%xmm0,   (%0, %%"REG_a", 2)   \n\t"
+            "movntdq             %%xmm1, 16(%0, %%"REG_a", 2)   \n\t"
+            "add                    $16, %%"REG_a"  \n\t"
+            "cmp                     %3, %%"REG_a"  \n\t"
+            " jb                     1b             \n\t"
+            ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
+            : "memory", "%"REG_a""
+        );
+#else
+        __asm__(
+            "xor %%"REG_a", %%"REG_a"               \n\t"
+            "1:                                     \n\t"
+            PREFETCH" 64(%1, %%"REG_a")             \n\t"
+            PREFETCH" 64(%2, %%"REG_a")             \n\t"
+            "movq       (%1, %%"REG_a"), %%mm0      \n\t"
+            "movq      8(%1, %%"REG_a"), %%mm2      \n\t"
+            "movq                 %%mm0, %%mm1      \n\t"
+            "movq                 %%mm2, %%mm3      \n\t"
+            "movq       (%2, %%"REG_a"), %%mm4      \n\t"
+            "movq      8(%2, %%"REG_a"), %%mm5      \n\t"
+            "punpcklbw            %%mm4, %%mm0      \n\t"
+            "punpckhbw            %%mm4, %%mm1      \n\t"
+            "punpcklbw            %%mm5, %%mm2      \n\t"
+            "punpckhbw            %%mm5, %%mm3      \n\t"
+            MOVNTQ"               %%mm0,   (%0, %%"REG_a", 2)   \n\t"
+            MOVNTQ"               %%mm1,  8(%0, %%"REG_a", 2)   \n\t"
+            MOVNTQ"               %%mm2, 16(%0, %%"REG_a", 2)   \n\t"
+            MOVNTQ"               %%mm3, 24(%0, %%"REG_a", 2)   \n\t"
+            "add                    $16, %%"REG_a"  \n\t"
+            "cmp                     %3, %%"REG_a"  \n\t"
+            " jb                     1b             \n\t"
+            ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
+            : "memory", "%"REG_a
+        );
+#endif
+        for (w= (width&(~15)); w < width; w++) {
+            dest[2*w+0] = src1[w];
+            dest[2*w+1] = src2[w];
+        }
+#else
+        for (w=0; w < width; w++) {
+            dest[2*w+0] = src1[w];
+            dest[2*w+1] = src2[w];
+        }
+#endif
+        dest += dstStride;
+                src1 += src1Stride;
+                src2 += src2Stride;
+    }
+#if HAVE_MMX
+    __asm__(
+            EMMS"       \n\t"
+            SFENCE"     \n\t"
+            ::: "memory"
+            );
+#endif
+}
+
+static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
+                                       uint8_t *dst1, uint8_t *dst2,
+                                       long width, long height,
+                                       long srcStride1, long srcStride2,
+                                       long dstStride1, long dstStride2)
+{
+    x86_reg y;
+    long x,w,h;
+    w=width/2; h=height/2;
+#if HAVE_MMX
+    __asm__ volatile(
+        PREFETCH" %0    \n\t"
+        PREFETCH" %1    \n\t"
+        ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
+#endif
+    for (y=0;y<h;y++) {
+        const uint8_t* s1=src1+srcStride1*(y>>1);
+        uint8_t* d=dst1+dstStride1*y;
+        x=0;
+#if HAVE_MMX
+        for (;x<w-31;x+=32) {
+            __asm__ volatile(
+                PREFETCH"   32%1        \n\t"
+                "movq         %1, %%mm0 \n\t"
+                "movq        8%1, %%mm2 \n\t"
+                "movq       16%1, %%mm4 \n\t"
+                "movq       24%1, %%mm6 \n\t"
+                "movq      %%mm0, %%mm1 \n\t"
+                "movq      %%mm2, %%mm3 \n\t"
+                "movq      %%mm4, %%mm5 \n\t"
+                "movq      %%mm6, %%mm7 \n\t"
+                "punpcklbw %%mm0, %%mm0 \n\t"
+                "punpckhbw %%mm1, %%mm1 \n\t"
+                "punpcklbw %%mm2, %%mm2 \n\t"
+                "punpckhbw %%mm3, %%mm3 \n\t"
+                "punpcklbw %%mm4, %%mm4 \n\t"
+                "punpckhbw %%mm5, %%mm5 \n\t"
+                "punpcklbw %%mm6, %%mm6 \n\t"
+                "punpckhbw %%mm7, %%mm7 \n\t"
+                MOVNTQ"    %%mm0,   %0  \n\t"
+                MOVNTQ"    %%mm1,  8%0  \n\t"
+                MOVNTQ"    %%mm2, 16%0  \n\t"
+                MOVNTQ"    %%mm3, 24%0  \n\t"
+                MOVNTQ"    %%mm4, 32%0  \n\t"
+                MOVNTQ"    %%mm5, 40%0  \n\t"
+                MOVNTQ"    %%mm6, 48%0  \n\t"
+                MOVNTQ"    %%mm7, 56%0"
+                :"=m"(d[2*x])
+                :"m"(s1[x])
+                :"memory");
+        }
+#endif
+        for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
+    }
+    for (y=0;y<h;y++) {
+        const uint8_t* s2=src2+srcStride2*(y>>1);
+        uint8_t* d=dst2+dstStride2*y;
+        x=0;
+#if HAVE_MMX
+        for (;x<w-31;x+=32) {
+            __asm__ volatile(
+                PREFETCH"   32%1        \n\t"
+                "movq         %1, %%mm0 \n\t"
+                "movq        8%1, %%mm2 \n\t"
+                "movq       16%1, %%mm4 \n\t"
+                "movq       24%1, %%mm6 \n\t"
+                "movq      %%mm0, %%mm1 \n\t"
+                "movq      %%mm2, %%mm3 \n\t"
+                "movq      %%mm4, %%mm5 \n\t"
+                "movq      %%mm6, %%mm7 \n\t"
+                "punpcklbw %%mm0, %%mm0 \n\t"
+                "punpckhbw %%mm1, %%mm1 \n\t"
+                "punpcklbw %%mm2, %%mm2 \n\t"
+                "punpckhbw %%mm3, %%mm3 \n\t"
+                "punpcklbw %%mm4, %%mm4 \n\t"
+                "punpckhbw %%mm5, %%mm5 \n\t"
+                "punpcklbw %%mm6, %%mm6 \n\t"
+                "punpckhbw %%mm7, %%mm7 \n\t"
+                MOVNTQ"    %%mm0,   %0  \n\t"
+                MOVNTQ"    %%mm1,  8%0  \n\t"
+                MOVNTQ"    %%mm2, 16%0  \n\t"
+                MOVNTQ"    %%mm3, 24%0  \n\t"
+                MOVNTQ"    %%mm4, 32%0  \n\t"
+                MOVNTQ"    %%mm5, 40%0  \n\t"
+                MOVNTQ"    %%mm6, 48%0  \n\t"
+                MOVNTQ"    %%mm7, 56%0"
+                :"=m"(d[2*x])
+                :"m"(s2[x])
+                :"memory");
+        }
+#endif
+        for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
+    }
+#if HAVE_MMX
+    __asm__(
+            EMMS"       \n\t"
+            SFENCE"     \n\t"
+            ::: "memory"
+        );
+#endif
+}
+
+static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
+                                        uint8_t *dst,
+                                        long width, long height,
+                                        long srcStride1, long srcStride2,
+                                        long srcStride3, long dstStride)
+{
+    x86_reg x;
+    long y,w,h;
+    w=width/2; h=height;
+    for (y=0;y<h;y++) {
+        const uint8_t* yp=src1+srcStride1*y;
+        const uint8_t* up=src2+srcStride2*(y>>2);
+        const uint8_t* vp=src3+srcStride3*(y>>2);
+        uint8_t* d=dst+dstStride*y;
+        x=0;
+#if HAVE_MMX
+        for (;x<w-7;x+=8) {
+            __asm__ volatile(
+                PREFETCH"   32(%1, %0)          \n\t"
+                PREFETCH"   32(%2, %0)          \n\t"
+                PREFETCH"   32(%3, %0)          \n\t"
+                "movq      (%1, %0, 4), %%mm0   \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+                "movq         (%2, %0), %%mm1   \n\t" /* U0U1U2U3U4U5U6U7 */
+                "movq         (%3, %0), %%mm2   \n\t" /* V0V1V2V3V4V5V6V7 */
+                "movq            %%mm0, %%mm3   \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+                "movq            %%mm1, %%mm4   \n\t" /* U0U1U2U3U4U5U6U7 */
+                "movq            %%mm2, %%mm5   \n\t" /* V0V1V2V3V4V5V6V7 */
+                "punpcklbw       %%mm1, %%mm1   \n\t" /* U0U0 U1U1 U2U2 U3U3 */
+                "punpcklbw       %%mm2, %%mm2   \n\t" /* V0V0 V1V1 V2V2 V3V3 */
+                "punpckhbw       %%mm4, %%mm4   \n\t" /* U4U4 U5U5 U6U6 U7U7 */
+                "punpckhbw       %%mm5, %%mm5   \n\t" /* V4V4 V5V5 V6V6 V7V7 */
+
+                "movq            %%mm1, %%mm6   \n\t"
+                "punpcklbw       %%mm2, %%mm1   \n\t" /* U0V0 U0V0 U1V1 U1V1*/
+                "punpcklbw       %%mm1, %%mm0   \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
+                "punpckhbw       %%mm1, %%mm3   \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
+                MOVNTQ"          %%mm0,  (%4, %0, 8)    \n\t"
+                MOVNTQ"          %%mm3, 8(%4, %0, 8)    \n\t"
+
+                "punpckhbw       %%mm2, %%mm6   \n\t" /* U2V2 U2V2 U3V3 U3V3*/
+                "movq     8(%1, %0, 4), %%mm0   \n\t"
+                "movq            %%mm0, %%mm3   \n\t"
+                "punpcklbw       %%mm6, %%mm0   \n\t" /* Y U2 Y V2 Y U2 Y V2*/
+                "punpckhbw       %%mm6, %%mm3   \n\t" /* Y U3 Y V3 Y U3 Y V3*/
+                MOVNTQ"          %%mm0, 16(%4, %0, 8)   \n\t"
+                MOVNTQ"          %%mm3, 24(%4, %0, 8)   \n\t"
+
+                "movq            %%mm4, %%mm6   \n\t"
+                "movq    16(%1, %0, 4), %%mm0   \n\t"
+                "movq            %%mm0, %%mm3   \n\t"
+                "punpcklbw       %%mm5, %%mm4   \n\t"
+                "punpcklbw       %%mm4, %%mm0   \n\t" /* Y U4 Y V4 Y U4 Y V4*/
+                "punpckhbw       %%mm4, %%mm3   \n\t" /* Y U5 Y V5 Y U5 Y V5*/
+                MOVNTQ"          %%mm0, 32(%4, %0, 8)   \n\t"
+                MOVNTQ"          %%mm3, 40(%4, %0, 8)   \n\t"
+
+                "punpckhbw       %%mm5, %%mm6   \n\t"
+                "movq    24(%1, %0, 4), %%mm0   \n\t"
+                "movq            %%mm0, %%mm3   \n\t"
+                "punpcklbw       %%mm6, %%mm0   \n\t" /* Y U6 Y V6 Y U6 Y V6*/
+                "punpckhbw       %%mm6, %%mm3   \n\t" /* Y U7 Y V7 Y U7 Y V7*/
+                MOVNTQ"          %%mm0, 48(%4, %0, 8)   \n\t"
+                MOVNTQ"          %%mm3, 56(%4, %0, 8)   \n\t"
+
+                : "+r" (x)
+                : "r"(yp), "r" (up), "r"(vp), "r"(d)
+                :"memory");
+        }
+#endif
+        for (; x<w; x++) {
+            const long x2 = x<<2;
+            d[8*x+0] = yp[x2];
+            d[8*x+1] = up[x];
+            d[8*x+2] = yp[x2+1];
+            d[8*x+3] = vp[x];
+            d[8*x+4] = yp[x2+2];
+            d[8*x+5] = up[x];
+            d[8*x+6] = yp[x2+3];
+            d[8*x+7] = vp[x];
+        }
+    }
+#if HAVE_MMX
+    __asm__(
+            EMMS"       \n\t"
+            SFENCE"     \n\t"
+            ::: "memory"
+        );
+#endif
+}
+
+static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count)
+{
+    dst +=   count;
+    src += 2*count;
+    count= - count;
+
+#if HAVE_MMX
+    if(count <= -16) {
+        count += 15;
+        __asm__ volatile(
+            "pcmpeqw       %%mm7, %%mm7        \n\t"
+            "psrlw            $8, %%mm7        \n\t"
+            "1:                                \n\t"
+            "movq -30(%1, %0, 2), %%mm0        \n\t"
+            "movq -22(%1, %0, 2), %%mm1        \n\t"
+            "movq -14(%1, %0, 2), %%mm2        \n\t"
+            "movq  -6(%1, %0, 2), %%mm3        \n\t"
+            "pand          %%mm7, %%mm0        \n\t"
+            "pand          %%mm7, %%mm1        \n\t"
+            "pand          %%mm7, %%mm2        \n\t"
+            "pand          %%mm7, %%mm3        \n\t"
+            "packuswb      %%mm1, %%mm0        \n\t"
+            "packuswb      %%mm3, %%mm2        \n\t"
+            MOVNTQ"        %%mm0,-15(%2, %0)   \n\t"
+            MOVNTQ"        %%mm2,- 7(%2, %0)   \n\t"
+            "add             $16, %0           \n\t"
+            " js 1b                            \n\t"
+            : "+r"(count)
+            : "r"(src), "r"(dst)
+        );
+        count -= 15;
+    }
+#endif
+    while(count<0) {
+        dst[count]= src[2*count];
+        count++;
+    }
+}
+
+static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
+{
+    dst0+=   count;
+    dst1+=   count;
+    src += 4*count;
+    count= - count;
+#if HAVE_MMX
+    if(count <= -8) {
+        count += 7;
+        __asm__ volatile(
+            "pcmpeqw       %%mm7, %%mm7        \n\t"
+            "psrlw            $8, %%mm7        \n\t"
+            "1:                                \n\t"
+            "movq -28(%1, %0, 4), %%mm0        \n\t"
+            "movq -20(%1, %0, 4), %%mm1        \n\t"
+            "movq -12(%1, %0, 4), %%mm2        \n\t"
+            "movq  -4(%1, %0, 4), %%mm3        \n\t"
+            "pand          %%mm7, %%mm0        \n\t"
+            "pand          %%mm7, %%mm1        \n\t"
+            "pand          %%mm7, %%mm2        \n\t"
+            "pand          %%mm7, %%mm3        \n\t"
+            "packuswb      %%mm1, %%mm0        \n\t"
+            "packuswb      %%mm3, %%mm2        \n\t"
+            "movq          %%mm0, %%mm1        \n\t"
+            "movq          %%mm2, %%mm3        \n\t"
+            "psrlw            $8, %%mm0        \n\t"
+            "psrlw            $8, %%mm2        \n\t"
+            "pand          %%mm7, %%mm1        \n\t"
+            "pand          %%mm7, %%mm3        \n\t"
+            "packuswb      %%mm2, %%mm0        \n\t"
+            "packuswb      %%mm3, %%mm1        \n\t"
+            MOVNTQ"        %%mm0,- 7(%3, %0)   \n\t"
+            MOVNTQ"        %%mm1,- 7(%2, %0)   \n\t"
+            "add              $8, %0           \n\t"
+            " js 1b                            \n\t"
+            : "+r"(count)
+            : "r"(src), "r"(dst0), "r"(dst1)
+        );
+        count -= 7;
+    }
+#endif
+    while(count<0) {
+        dst0[count]= src[4*count+0];
+        dst1[count]= src[4*count+2];
+        count++;
+    }
+}
+
+static void RENAME(extract_even2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
+{
+    dst0 +=   count;
+    dst1 +=   count;
+    src0 += 4*count;
+    src1 += 4*count;
+    count= - count;
+#ifdef PAVGB
+    if(count <= -8) {
+        count += 7;
+        __asm__ volatile(
+            "pcmpeqw        %%mm7, %%mm7        \n\t"
+            "psrlw             $8, %%mm7        \n\t"
+            "1:                                \n\t"
+            "movq  -28(%1, %0, 4), %%mm0        \n\t"
+            "movq  -20(%1, %0, 4), %%mm1        \n\t"
+            "movq  -12(%1, %0, 4), %%mm2        \n\t"
+            "movq   -4(%1, %0, 4), %%mm3        \n\t"
+            PAVGB" -28(%2, %0, 4), %%mm0        \n\t"
+            PAVGB" -20(%2, %0, 4), %%mm1        \n\t"
+            PAVGB" -12(%2, %0, 4), %%mm2        \n\t"
+            PAVGB" - 4(%2, %0, 4), %%mm3        \n\t"
+            "pand           %%mm7, %%mm0        \n\t"
+            "pand           %%mm7, %%mm1        \n\t"
+            "pand           %%mm7, %%mm2        \n\t"
+            "pand           %%mm7, %%mm3        \n\t"
+            "packuswb       %%mm1, %%mm0        \n\t"
+            "packuswb       %%mm3, %%mm2        \n\t"
+            "movq           %%mm0, %%mm1        \n\t"
+            "movq           %%mm2, %%mm3        \n\t"
+            "psrlw             $8, %%mm0        \n\t"
+            "psrlw             $8, %%mm2        \n\t"
+            "pand           %%mm7, %%mm1        \n\t"
+            "pand           %%mm7, %%mm3        \n\t"
+            "packuswb       %%mm2, %%mm0        \n\t"
+            "packuswb       %%mm3, %%mm1        \n\t"
+            MOVNTQ"         %%mm0,- 7(%4, %0)   \n\t"
+            MOVNTQ"         %%mm1,- 7(%3, %0)   \n\t"
+            "add               $8, %0           \n\t"
+            " js 1b                            \n\t"
+            : "+r"(count)
+            : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
+        );
+        count -= 7;
+    }
+#endif
+    while(count<0) {
+        dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
+        dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
+        count++;
+    }
+}
+
+static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
+{
+    dst0+=   count;
+    dst1+=   count;
+    src += 4*count;
+    count= - count;
+#if HAVE_MMX
+    if(count <= -8) {
+        count += 7;
+        __asm__ volatile(
+            "pcmpeqw       %%mm7, %%mm7        \n\t"
+            "psrlw            $8, %%mm7        \n\t"
+            "1:                                \n\t"
+            "movq -28(%1, %0, 4), %%mm0        \n\t"
+            "movq -20(%1, %0, 4), %%mm1        \n\t"
+            "movq -12(%1, %0, 4), %%mm2        \n\t"
+            "movq  -4(%1, %0, 4), %%mm3        \n\t"
+            "psrlw            $8, %%mm0        \n\t"
+            "psrlw            $8, %%mm1        \n\t"
+            "psrlw            $8, %%mm2        \n\t"
+            "psrlw            $8, %%mm3        \n\t"
+            "packuswb      %%mm1, %%mm0        \n\t"
+            "packuswb      %%mm3, %%mm2        \n\t"
+            "movq          %%mm0, %%mm1        \n\t"
+            "movq          %%mm2, %%mm3        \n\t"
+            "psrlw            $8, %%mm0        \n\t"
+            "psrlw            $8, %%mm2        \n\t"
+            "pand          %%mm7, %%mm1        \n\t"
+            "pand          %%mm7, %%mm3        \n\t"
+            "packuswb      %%mm2, %%mm0        \n\t"
+            "packuswb      %%mm3, %%mm1        \n\t"
+            MOVNTQ"        %%mm0,- 7(%3, %0)   \n\t"
+            MOVNTQ"        %%mm1,- 7(%2, %0)   \n\t"
+            "add              $8, %0           \n\t"
+            " js 1b                            \n\t"
+            : "+r"(count)
+            : "r"(src), "r"(dst0), "r"(dst1)
+        );
+        count -= 7;
+    }
+#endif
+    src++;
+    while(count<0) {
+        dst0[count]= src[4*count+0];
+        dst1[count]= src[4*count+2];
+        count++;
+    }
+}
+
+static void RENAME(extract_odd2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
+{
+    dst0 +=   count;
+    dst1 +=   count;
+    src0 += 4*count;
+    src1 += 4*count;
+    count= - count;
+#ifdef PAVGB
+    if(count <= -8) {
+        count += 7;
+        __asm__ volatile(
+            "pcmpeqw        %%mm7, %%mm7        \n\t"
+            "psrlw             $8, %%mm7        \n\t"
+            "1:                                \n\t"
+            "movq  -28(%1, %0, 4), %%mm0        \n\t"
+            "movq  -20(%1, %0, 4), %%mm1        \n\t"
+            "movq  -12(%1, %0, 4), %%mm2        \n\t"
+            "movq   -4(%1, %0, 4), %%mm3        \n\t"
+            PAVGB" -28(%2, %0, 4), %%mm0        \n\t"
+            PAVGB" -20(%2, %0, 4), %%mm1        \n\t"
+            PAVGB" -12(%2, %0, 4), %%mm2        \n\t"
+            PAVGB" - 4(%2, %0, 4), %%mm3        \n\t"
+            "psrlw             $8, %%mm0        \n\t"
+            "psrlw             $8, %%mm1        \n\t"
+            "psrlw             $8, %%mm2        \n\t"
+            "psrlw             $8, %%mm3        \n\t"
+            "packuswb       %%mm1, %%mm0        \n\t"
+            "packuswb       %%mm3, %%mm2        \n\t"
+            "movq           %%mm0, %%mm1        \n\t"
+            "movq           %%mm2, %%mm3        \n\t"
+            "psrlw             $8, %%mm0        \n\t"
+            "psrlw             $8, %%mm2        \n\t"
+            "pand           %%mm7, %%mm1        \n\t"
+            "pand           %%mm7, %%mm3        \n\t"
+            "packuswb       %%mm2, %%mm0        \n\t"
+            "packuswb       %%mm3, %%mm1        \n\t"
+            MOVNTQ"         %%mm0,- 7(%4, %0)   \n\t"
+            MOVNTQ"         %%mm1,- 7(%3, %0)   \n\t"
+            "add               $8, %0           \n\t"
+            " js 1b                            \n\t"
+            : "+r"(count)
+            : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
+        );
+        count -= 7;
+    }
+#endif
+    src0++;
+    src1++;
+    while(count<0) {
+        dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
+        dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
+        count++;
+    }
+}
+
+static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                                      long width, long height,
+                                      long lumStride, long chromStride, long srcStride)
+{
+    long y;
+    const long chromWidth= -((-width)>>1);
+
+    for (y=0; y<height; y++) {
+        RENAME(extract_even)(src, ydst, width);
+        if(y&1) {
+            RENAME(extract_odd2avg)(src-srcStride, src, udst, vdst, chromWidth);
+            udst+= chromStride;
+            vdst+= chromStride;
+        }
+
+        src += srcStride;
+        ydst+= lumStride;
+    }
+#if HAVE_MMX
+    __asm__(
+            EMMS"       \n\t"
+            SFENCE"     \n\t"
+            ::: "memory"
+        );
+#endif
+}
+
+static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                                      long width, long height,
+                                      long lumStride, long chromStride, long srcStride)
+{
+    long y;
+    const long chromWidth= -((-width)>>1);
+
+    for (y=0; y<height; y++) {
+        RENAME(extract_even)(src, ydst, width);
+        RENAME(extract_odd2)(src, udst, vdst, chromWidth);
+
+        src += srcStride;
+        ydst+= lumStride;
+        udst+= chromStride;
+        vdst+= chromStride;
+    }
+#if HAVE_MMX
+    __asm__(
+            EMMS"       \n\t"
+            SFENCE"     \n\t"
+            ::: "memory"
+        );
+#endif
+}
+
+static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                                      long width, long height,
+                                      long lumStride, long chromStride, long srcStride)
+{
+    long y;
+    const long chromWidth= -((-width)>>1);
+
+    for (y=0; y<height; y++) {
+        RENAME(extract_even)(src+1, ydst, width);
+        if(y&1) {
+            RENAME(extract_even2avg)(src-srcStride, src, udst, vdst, chromWidth);
+            udst+= chromStride;
+            vdst+= chromStride;
+        }
+
+        src += srcStride;
+        ydst+= lumStride;
+    }
+#if HAVE_MMX
+    __asm__(
+            EMMS"       \n\t"
+            SFENCE"     \n\t"
+            ::: "memory"
+        );
+#endif
+}
+
+static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
+                                      long width, long height,
+                                      long lumStride, long chromStride, long srcStride)
+{
+    long y;
+    const long chromWidth= -((-width)>>1);
+
+    for (y=0; y<height; y++) {
+        RENAME(extract_even)(src+1, ydst, width);
+        RENAME(extract_even2)(src, udst, vdst, chromWidth);
+
+        src += srcStride;
+        ydst+= lumStride;
+        udst+= chromStride;
+        vdst+= chromStride;
+    }
+#if HAVE_MMX
+    __asm__(
+            EMMS"       \n\t"
+            SFENCE"     \n\t"
+            ::: "memory"
+        );
+#endif
+}
+
+static inline void RENAME(rgb2rgb_init)(void)
+{
+    rgb15to16       = RENAME(rgb15to16);
+    rgb15tobgr24    = RENAME(rgb15tobgr24);
+    rgb15to32       = RENAME(rgb15to32);
+    rgb16tobgr24    = RENAME(rgb16tobgr24);
+    rgb16to32       = RENAME(rgb16to32);
+    rgb16to15       = RENAME(rgb16to15);
+    rgb24tobgr16    = RENAME(rgb24tobgr16);
+    rgb24tobgr15    = RENAME(rgb24tobgr15);
+    rgb24tobgr32    = RENAME(rgb24tobgr32);
+    rgb32to16       = RENAME(rgb32to16);
+    rgb32to15       = RENAME(rgb32to15);
+    rgb32tobgr24    = RENAME(rgb32tobgr24);
+    rgb24to15       = RENAME(rgb24to15);
+    rgb24to16       = RENAME(rgb24to16);
+    rgb24tobgr24    = RENAME(rgb24tobgr24);
+    rgb32tobgr32    = RENAME(rgb32tobgr32);
+    rgb32tobgr16    = RENAME(rgb32tobgr16);
+    rgb32tobgr15    = RENAME(rgb32tobgr15);
+    yv12toyuy2      = RENAME(yv12toyuy2);
+    yv12touyvy      = RENAME(yv12touyvy);
+    yuv422ptoyuy2   = RENAME(yuv422ptoyuy2);
+    yuv422ptouyvy   = RENAME(yuv422ptouyvy);
+    yuy2toyv12      = RENAME(yuy2toyv12);
+//    yvu9toyv12      = RENAME(yvu9toyv12);
+    planar2x        = RENAME(planar2x);
+    rgb24toyv12     = RENAME(rgb24toyv12);
+    interleaveBytes = RENAME(interleaveBytes);
+    vu9_to_vu12     = RENAME(vu9_to_vu12);
+    yvu9_to_yuy2    = RENAME(yvu9_to_yuy2);
+
+    uyvytoyuv420    = RENAME(uyvytoyuv420);
+    uyvytoyuv422    = RENAME(uyvytoyuv422);
+    yuyvtoyuv420    = RENAME(yuyvtoyuv420);
+    yuyvtoyuv422    = RENAME(yuyvtoyuv422);
+}

Added: branches/0.6/libswscale/sparc/yuv2rgb_vis.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/sparc/yuv2rgb_vis.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,212 @@
+/*
+ * VIS optimized software YUV to RGB converter
+ * Copyright (c) 2007 Denes Balatoni <dbalatoni at programozo.hu>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+
+#define YUV2RGB_INIT \
+    "wr %%g0, 0x10, %%gsr \n\t" \
+    "ldd [%5], %%f32      \n\t" \
+    "ldd [%5+8], %%f34    \n\t" \
+    "ldd [%5+16], %%f36   \n\t" \
+    "ldd [%5+24], %%f38   \n\t" \
+    "ldd [%5+32], %%f40   \n\t" \
+    "ldd [%5+40], %%f42   \n\t" \
+    "ldd [%5+48], %%f44   \n\t" \
+    "ldd [%5+56], %%f46   \n\t" \
+    "ldd [%5+64], %%f48   \n\t" \
+    "ldd [%5+72], %%f50   \n\t"
+
+#define YUV2RGB_KERNEL \
+    /* ^^^^ f0=Y f3=u f5=v */ \
+    "fmul8x16 %%f3, %%f48, %%f6   \n\t" \
+    "fmul8x16 %%f19, %%f48, %%f22 \n\t" \
+    "fmul8x16 %%f5, %%f44, %%f8   \n\t" \
+    "fmul8x16 %%f21, %%f44, %%f24 \n\t" \
+    "fmul8x16 %%f0, %%f42, %%f0   \n\t" \
+    "fmul8x16 %%f16, %%f42, %%f16 \n\t" \
+    "fmul8x16 %%f3, %%f50, %%f2   \n\t" \
+    "fmul8x16 %%f19, %%f50, %%f18 \n\t" \
+    "fmul8x16 %%f5, %%f46, %%f4   \n\t" \
+    "fmul8x16 %%f21, %%f46, %%f20 \n\t" \
+    \
+    "fpsub16 %%f6, %%f34, %%f6   \n\t" /* 1 */ \
+    "fpsub16 %%f22, %%f34, %%f22 \n\t" /* 1 */ \
+    "fpsub16 %%f8, %%f38, %%f8   \n\t" /* 3 */ \
+    "fpsub16 %%f24, %%f38, %%f24 \n\t" /* 3 */ \
+    "fpsub16 %%f0, %%f32, %%f0   \n\t" /* 0 */ \
+    "fpsub16 %%f16, %%f32, %%f16 \n\t" /* 0 */ \
+    "fpsub16 %%f2, %%f36, %%f2   \n\t" /* 2 */ \
+    "fpsub16 %%f18, %%f36, %%f18 \n\t" /* 2 */ \
+    "fpsub16 %%f4, %%f40, %%f4   \n\t" /* 4 */ \
+    "fpsub16 %%f20, %%f40, %%f20 \n\t" /* 4 */ \
+    \
+    "fpadd16 %%f0, %%f8, %%f8    \n\t" /* Gt */ \
+    "fpadd16 %%f16, %%f24, %%f24 \n\t" /* Gt */ \
+    "fpadd16 %%f0, %%f4, %%f4    \n\t" /* R */ \
+    "fpadd16 %%f16, %%f20, %%f20 \n\t" /* R */ \
+    "fpadd16 %%f0, %%f6, %%f6    \n\t" /* B */ \
+    "fpadd16 %%f16, %%f22, %%f22 \n\t" /* B */ \
+    "fpadd16 %%f8, %%f2, %%f2    \n\t" /* G */ \
+    "fpadd16 %%f24, %%f18, %%f18 \n\t" /* G */ \
+    \
+    "fpack16 %%f4, %%f4    \n\t" \
+    "fpack16 %%f20, %%f20  \n\t" \
+    "fpack16 %%f6, %%f6    \n\t" \
+    "fpack16 %%f22, %%f22  \n\t" \
+    "fpack16 %%f2, %%f2    \n\t" \
+    "fpack16 %%f18, %%f18  \n\t"
+
+
+
+// FIXME: must be changed to set alpha to 255 instead of 0
+static int vis_420P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                           int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int y, out1, out2, out3, out4, out5, out6;
+
+    for(y=0;y < srcSliceH;++y) {
+        __asm__ volatile (
+            YUV2RGB_INIT
+            "wr %%g0, 0xd2, %%asi        \n\t" /* ASI_FL16_P */
+            "1:                          \n\t"
+            "ldda [%1] %%asi, %%f2       \n\t"
+            "ldda [%1+2] %%asi, %%f18    \n\t"
+            "ldda [%2] %%asi, %%f4       \n\t"
+            "ldda [%2+2] %%asi, %%f20    \n\t"
+            "ld [%0], %%f0               \n\t"
+            "ld [%0+4], %%f16            \n\t"
+            "fpmerge %%f3, %%f3, %%f2    \n\t"
+            "fpmerge %%f19, %%f19, %%f18 \n\t"
+            "fpmerge %%f5, %%f5, %%f4    \n\t"
+            "fpmerge %%f21, %%f21, %%f20 \n\t"
+            YUV2RGB_KERNEL
+            "fzero %%f0                  \n\t"
+            "fpmerge %%f4, %%f6, %%f8    \n\t"  // r,b,t1
+            "fpmerge %%f20, %%f22, %%f24 \n\t"  // r,b,t1
+            "fpmerge %%f0, %%f2, %%f10   \n\t"  // 0,g,t2
+            "fpmerge %%f0, %%f18, %%f26  \n\t"  // 0,g,t2
+            "fpmerge %%f10, %%f8, %%f4   \n\t"  // t2,t1,msb
+            "fpmerge %%f26, %%f24, %%f20 \n\t"  // t2,t1,msb
+            "fpmerge %%f11, %%f9, %%f6   \n\t"  // t2,t1,lsb
+            "fpmerge %%f27, %%f25, %%f22 \n\t"  // t2,t1,lsb
+            "std %%f4, [%3]              \n\t"
+            "std %%f20, [%3+16]          \n\t"
+            "std %%f6, [%3+8]            \n\t"
+            "std %%f22, [%3+24]          \n\t"
+
+            "add %0, 8, %0   \n\t"
+            "add %1, 4, %1   \n\t"
+            "add %2, 4, %2   \n\t"
+            "subcc %4, 8, %4 \n\t"
+            "bne 1b          \n\t"
+            "add %3, 32, %3  \n\t" //delay slot
+            : "=r" (out1), "=r" (out2), "=r" (out3), "=r" (out4), "=r" (out5), "=r" (out6)
+            : "0" (src[0]+(y+srcSliceY)*srcStride[0]), "1" (src[1]+((y+srcSliceY)>>1)*srcStride[1]),
+                "2" (src[2]+((y+srcSliceY)>>1)*srcStride[2]), "3" (dst[0]+(y+srcSliceY)*dstStride[0]),
+                "4" (c->dstW),
+                "5" (c->sparc_coeffs)
+        );
+    }
+
+    return srcSliceH;
+}
+
+// FIXME: must be changed to set alpha to 255 instead of 0
+static int vis_422P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+                           int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int y, out1, out2, out3, out4, out5, out6;
+
+    for(y=0;y < srcSliceH;++y) {
+        __asm__ volatile (
+            YUV2RGB_INIT
+            "wr %%g0, 0xd2, %%asi        \n\t" /* ASI_FL16_P */
+            "1:                          \n\t"
+            "ldda [%1] %%asi, %%f2       \n\t"
+            "ldda [%1+2] %%asi, %%f18    \n\t"
+            "ldda [%2] %%asi, %%f4       \n\t"
+            "ldda [%2+2] %%asi, %%f20    \n\t"
+            "ld [%0], %%f0               \n\t"
+            "ld [%0+4], %%f16            \n\t"
+            "fpmerge %%f3, %%f3, %%f2    \n\t"
+            "fpmerge %%f19, %%f19, %%f18 \n\t"
+            "fpmerge %%f5, %%f5, %%f4    \n\t"
+            "fpmerge %%f21, %%f21, %%f20 \n\t"
+            YUV2RGB_KERNEL
+            "fzero %%f0 \n\t"
+            "fpmerge %%f4, %%f6, %%f8    \n\t"  // r,b,t1
+            "fpmerge %%f20, %%f22, %%f24 \n\t"  // r,b,t1
+            "fpmerge %%f0, %%f2, %%f10   \n\t"  // 0,g,t2
+            "fpmerge %%f0, %%f18, %%f26  \n\t"  // 0,g,t2
+            "fpmerge %%f10, %%f8, %%f4   \n\t"  // t2,t1,msb
+            "fpmerge %%f26, %%f24, %%f20 \n\t"  // t2,t1,msb
+            "fpmerge %%f11, %%f9, %%f6   \n\t"  // t2,t1,lsb
+            "fpmerge %%f27, %%f25, %%f22 \n\t"  // t2,t1,lsb
+            "std %%f4, [%3]              \n\t"
+            "std %%f20, [%3+16]          \n\t"
+            "std %%f6, [%3+8]            \n\t"
+            "std %%f22, [%3+24]          \n\t"
+
+            "add %0, 8, %0   \n\t"
+            "add %1, 4, %1   \n\t"
+            "add %2, 4, %2   \n\t"
+            "subcc %4, 8, %4 \n\t"
+            "bne 1b          \n\t"
+            "add %3, 32, %3  \n\t" //delay slot
+            : "=r" (out1), "=r" (out2), "=r" (out3), "=r" (out4), "=r" (out5), "=r" (out6)
+            : "0" (src[0]+(y+srcSliceY)*srcStride[0]), "1" (src[1]+(y+srcSliceY)*srcStride[1]),
+                "2" (src[2]+(y+srcSliceY)*srcStride[2]), "3" (dst[0]+(y+srcSliceY)*dstStride[0]),
+                "4" (c->dstW),
+                "5" (c->sparc_coeffs)
+        );
+    }
+
+    return srcSliceH;
+}
+
+SwsFunc ff_yuv2rgb_init_vis(SwsContext *c)
+{
+    c->sparc_coeffs[5]=c->yCoeff;
+    c->sparc_coeffs[6]=c->vgCoeff;
+    c->sparc_coeffs[7]=c->vrCoeff;
+    c->sparc_coeffs[8]=c->ubCoeff;
+    c->sparc_coeffs[9]=c->ugCoeff;
+
+    c->sparc_coeffs[0]=(((int16_t)c->yOffset*(int16_t)c->yCoeff >>11) & 0xffff) * 0x0001000100010001ULL;
+    c->sparc_coeffs[1]=(((int16_t)c->uOffset*(int16_t)c->ubCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
+    c->sparc_coeffs[2]=(((int16_t)c->uOffset*(int16_t)c->ugCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
+    c->sparc_coeffs[3]=(((int16_t)c->vOffset*(int16_t)c->vgCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
+    c->sparc_coeffs[4]=(((int16_t)c->vOffset*(int16_t)c->vrCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
+
+    if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV422P && (c->dstW & 7)==0) {
+        av_log(c, AV_LOG_INFO, "SPARC VIS accelerated YUV422P -> RGB32 (WARNING: alpha value is wrong)\n");
+        return vis_422P_ARGB32;
+    }
+    else if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV420P && (c->dstW & 7)==0) {
+        av_log(c, AV_LOG_INFO, "SPARC VIS accelerated YUV420P -> RGB32 (WARNING: alpha value is wrong)\n");
+        return vis_420P_ARGB32;
+    }
+    return NULL;
+}

Added: branches/0.6/libswscale/swscale-test.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/swscale-test.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2003 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <stdarg.h>
+
+#undef HAVE_AV_CONFIG_H
+#include "libavutil/mem.h"
+#include "libavutil/avutil.h"
+#include "libavutil/lfg.h"
+#include "swscale.h"
+
+/* HACK Duplicated from swscale_internal.h.
+ * Should be removed when a cleaner pixel format system exists. */
+const char *sws_format_name(enum PixelFormat format);
+#define isGray(x)       (           \
+           (x)==PIX_FMT_GRAY8       \
+        || (x)==PIX_FMT_GRAY16BE    \
+        || (x)==PIX_FMT_GRAY16LE    \
+    )
+#define hasChroma(x)   (!(          \
+            isGray(x)               \
+        || (x)==PIX_FMT_MONOBLACK   \
+        || (x)==PIX_FMT_MONOWHITE   \
+    ))
+#define isALPHA(x)      (           \
+           (x)==PIX_FMT_BGR32       \
+        || (x)==PIX_FMT_BGR32_1     \
+        || (x)==PIX_FMT_RGB32       \
+        || (x)==PIX_FMT_RGB32_1     \
+        || (x)==PIX_FMT_YUVA420P    \
+    )
+
+static uint64_t getSSD(uint8_t *src1, uint8_t *src2, int stride1, int stride2, int w, int h)
+{
+    int x,y;
+    uint64_t ssd=0;
+
+//printf("%d %d\n", w, h);
+
+    for (y=0; y<h; y++) {
+        for (x=0; x<w; x++) {
+            int d= src1[x + y*stride1] - src2[x + y*stride2];
+            ssd+= d*d;
+//printf("%d", abs(src1[x + y*stride1] - src2[x + y*stride2])/26 );
+        }
+//printf("\n");
+    }
+    return ssd;
+}
+
+// test by ref -> src -> dst -> out & compare out against ref
+// ref & out are YV12
+static int doTest(uint8_t *ref[4], int refStride[4], int w, int h,
+                  enum PixelFormat srcFormat, enum PixelFormat dstFormat,
+                  int srcW, int srcH, int dstW, int dstH, int flags)
+{
+    uint8_t *src[4] = {0};
+    uint8_t *dst[4] = {0};
+    uint8_t *out[4] = {0};
+    int srcStride[4], dstStride[4];
+    int i;
+    uint64_t ssdY, ssdU=0, ssdV=0, ssdA=0;
+    struct SwsContext *srcContext = NULL, *dstContext = NULL,
+                      *outContext = NULL;
+    int res;
+
+    res = 0;
+    for (i=0; i<4; i++) {
+        // avoid stride % bpp != 0
+        if (srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24)
+            srcStride[i]= srcW*3;
+        else if (srcFormat==PIX_FMT_RGB48BE || srcFormat==PIX_FMT_RGB48LE)
+            srcStride[i]= srcW*6;
+        else
+            srcStride[i]= srcW*4;
+
+        if (dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24)
+            dstStride[i]= dstW*3;
+        else if (dstFormat==PIX_FMT_RGB48BE || dstFormat==PIX_FMT_RGB48LE)
+            dstStride[i]= dstW*6;
+        else
+            dstStride[i]= dstW*4;
+
+        /* Image buffers passed into libswscale can be allocated any way you
+         * prefer, as long as they're aligned enough for the architecture, and
+         * they're freed appropriately (such as using av_free for buffers
+         * allocated with av_malloc). */
+        src[i]= av_mallocz(srcStride[i]*srcH);
+        dst[i]= av_mallocz(dstStride[i]*dstH);
+        out[i]= av_mallocz(refStride[i]*h);
+        if (!src[i] || !dst[i] || !out[i]) {
+            perror("Malloc");
+            res = -1;
+
+            goto end;
+        }
+    }
+
+    srcContext= sws_getContext(w, h, PIX_FMT_YUVA420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL);
+    if (!srcContext) {
+        fprintf(stderr, "Failed to get %s ---> %s\n",
+                sws_format_name(PIX_FMT_YUVA420P),
+                sws_format_name(srcFormat));
+        res = -1;
+
+        goto end;
+    }
+    dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL);
+    if (!dstContext) {
+        fprintf(stderr, "Failed to get %s ---> %s\n",
+                sws_format_name(srcFormat),
+                sws_format_name(dstFormat));
+        res = -1;
+
+        goto end;
+    }
+    outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUVA420P, flags, NULL, NULL, NULL);
+    if (!outContext) {
+        fprintf(stderr, "Failed to get %s ---> %s\n",
+                sws_format_name(dstFormat),
+                sws_format_name(PIX_FMT_YUVA420P));
+        res = -1;
+
+        goto end;
+    }
+//    printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2],
+//        (int)src[0], (int)src[1], (int)src[2]);
+
+    sws_scale(srcContext, ref, refStride, 0, h   , src, srcStride);
+    sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
+    sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
+
+    ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
+    if (hasChroma(srcFormat) && hasChroma(dstFormat)) {
+        //FIXME check that output is really gray
+        ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);
+        ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);
+    }
+    if (isALPHA(srcFormat) && isALPHA(dstFormat))
+        ssdA= getSSD(ref[3], out[3], refStride[3], refStride[3], w, h);
+
+    ssdY/= w*h;
+    ssdU/= w*h/4;
+    ssdV/= w*h/4;
+    ssdA/= w*h;
+
+    printf(" %s %dx%d -> %s %4dx%4d flags=%2d SSD=%5"PRId64",%5"PRId64",%5"PRId64",%5"PRId64"\n",
+           sws_format_name(srcFormat), srcW, srcH,
+           sws_format_name(dstFormat), dstW, dstH,
+           flags, ssdY, ssdU, ssdV, ssdA);
+    fflush(stdout);
+
+end:
+
+    sws_freeContext(srcContext);
+    sws_freeContext(dstContext);
+    sws_freeContext(outContext);
+
+    for (i=0; i<4; i++) {
+        av_free(src[i]);
+        av_free(dst[i]);
+        av_free(out[i]);
+    }
+
+    return res;
+}
+
+static void selfTest(uint8_t *ref[4], int refStride[4], int w, int h)
+{
+    const int flags[] = { SWS_FAST_BILINEAR,
+                          SWS_BILINEAR, SWS_BICUBIC,
+                          SWS_X       , SWS_POINT  , SWS_AREA, 0 };
+    const int srcW = w;
+    const int srcH = h;
+    const int dstW[] = { srcW - srcW/3, srcW, srcW + srcW/3, 0 };
+    const int dstH[] = { srcH - srcH/3, srcH, srcH + srcH/3, 0 };
+    enum PixelFormat srcFormat, dstFormat;
+
+    for (srcFormat = 0; srcFormat < PIX_FMT_NB; srcFormat++) {
+        if (!sws_isSupportedInput(srcFormat) || !sws_isSupportedOutput(srcFormat))
+            continue;
+
+        for (dstFormat = 0; dstFormat < PIX_FMT_NB; dstFormat++) {
+            int i, j, k;
+            int res = 0;
+
+            if (!sws_isSupportedInput(dstFormat) || !sws_isSupportedOutput(dstFormat))
+                continue;
+
+            printf("%s -> %s\n",
+                   sws_format_name(srcFormat),
+                   sws_format_name(dstFormat));
+            fflush(stdout);
+
+            for (i = 0; dstW[i] && !res; i++)
+                for (j = 0; dstH[j] && !res; j++)
+                    for (k = 0; flags[k] && !res; k++)
+                        res = doTest(ref, refStride, w, h, srcFormat, dstFormat,
+                                     srcW, srcH, dstW[i], dstH[j], flags[k]);
+        }
+    }
+}
+
+#define W 96
+#define H 96
+
+int main(int argc, char **argv)
+{
+    uint8_t *rgb_data = av_malloc (W*H*4);
+    uint8_t *rgb_src[3]= {rgb_data, NULL, NULL};
+    int rgb_stride[3]={4*W, 0, 0};
+    uint8_t *data = av_malloc (4*W*H);
+    uint8_t *src[4]= {data, data+W*H, data+W*H*2, data+W*H*3};
+    int stride[4]={W, W, W, W};
+    int x, y;
+    struct SwsContext *sws;
+    AVLFG rand;
+
+    if (!rgb_data || !data)
+        return -1;
+
+    sws= sws_getContext(W/12, H/12, PIX_FMT_RGB32, W, H, PIX_FMT_YUVA420P, SWS_BILINEAR, NULL, NULL, NULL);
+
+    av_lfg_init(&rand, 1);
+
+    for (y=0; y<H; y++) {
+        for (x=0; x<W*4; x++) {
+            rgb_data[ x + y*4*W]= av_lfg_get(&rand);
+        }
+    }
+    sws_scale(sws, rgb_src, rgb_stride, 0, H, src, stride);
+    sws_freeContext(sws);
+    av_free(rgb_data);
+
+    selfTest(src, stride, W, H);
+    av_free(data);
+
+    return 0;
+}

Added: branches/0.6/libswscale/swscale.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/swscale.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,1964 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/*
+  supported Input formats: YV12, I420/IYUV, YUY2, UYVY, BGR32, BGR32_1, BGR24, BGR16, BGR15, RGB32, RGB32_1, RGB24, Y8/Y800, YVU9/IF09, PAL8
+  supported output formats: YV12, I420/IYUV, YUY2, UYVY, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09
+  {BGR,RGB}{1,4,8,15,16} support dithering
+
+  unscaled special converters (YV12=I420=IYUV, Y800=Y8)
+  YV12 -> {BGR,RGB}{1,4,8,12,15,16,24,32}
+  x -> x
+  YUV9 -> YV12
+  YUV9/YV12 -> Y800
+  Y800 -> YUV9/YV12
+  BGR24 -> BGR32 & RGB24 -> RGB32
+  BGR32 -> BGR24 & RGB32 -> RGB24
+  BGR15 -> BGR16
+*/
+
+/*
+tested special converters (most are tested actually, but I did not write it down ...)
+ YV12 -> BGR12/BGR16
+ YV12 -> YV12
+ BGR15 -> BGR16
+ BGR16 -> BGR16
+ YVU9 -> YV12
+
+untested special converters
+  YV12/I420 -> BGR15/BGR24/BGR32 (it is the yuv2rgb stuff, so it should be OK)
+  YV12/I420 -> YV12/I420
+  YUY2/BGR15/BGR24/BGR32/RGB24/RGB32 -> same format
+  BGR24 -> BGR32 & RGB24 -> RGB32
+  BGR32 -> BGR24 & RGB32 -> RGB24
+  BGR24 -> YV12
+*/
+
+#include <inttypes.h>
+#include <string.h>
+#include <math.h>
+#include <stdio.h>
+#include "config.h"
+#include <assert.h>
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "rgb2rgb.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/x86_cpu.h"
+#include "libavutil/avutil.h"
+#include "libavutil/bswap.h"
+#include "libavutil/pixdesc.h"
+
+#undef MOVNTQ
+#undef PAVGB
+
+//#undef HAVE_MMX2
+//#define HAVE_AMD3DNOW
+//#undef HAVE_MMX
+//#undef ARCH_X86
+#define DITHER1XBPP
+
+#define FAST_BGR2YV12 // use 7 bit coefficients instead of 15 bit
+
+#ifdef M_PI
+#define PI M_PI
+#else
+#define PI 3.14159265358979323846
+#endif
+
+#define isPacked(x)         (       \
+           (x)==PIX_FMT_PAL8        \
+        || (x)==PIX_FMT_YUYV422     \
+        || (x)==PIX_FMT_UYVY422     \
+        || isAnyRGB(x)              \
+    )
+
+#define RGB2YUV_SHIFT 15
+#define BY ( (int)(0.114*219/255*(1<<RGB2YUV_SHIFT)+0.5))
+#define BV (-(int)(0.081*224/255*(1<<RGB2YUV_SHIFT)+0.5))
+#define BU ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5))
+#define GY ( (int)(0.587*219/255*(1<<RGB2YUV_SHIFT)+0.5))
+#define GV (-(int)(0.419*224/255*(1<<RGB2YUV_SHIFT)+0.5))
+#define GU (-(int)(0.331*224/255*(1<<RGB2YUV_SHIFT)+0.5))
+#define RY ( (int)(0.299*219/255*(1<<RGB2YUV_SHIFT)+0.5))
+#define RV ( (int)(0.500*224/255*(1<<RGB2YUV_SHIFT)+0.5))
+#define RU (-(int)(0.169*224/255*(1<<RGB2YUV_SHIFT)+0.5))
+
+static const double rgb2yuv_table[8][9]={
+    {0.7152, 0.0722, 0.2126, -0.386, 0.5, -0.115, -0.454, -0.046, 0.5},
+    {0.7152, 0.0722, 0.2126, -0.386, 0.5, -0.115, -0.454, -0.046, 0.5},
+    {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5},
+    {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5},
+    {0.59  , 0.11  , 0.30  , -0.331, 0.5, -0.169, -0.421, -0.079, 0.5}, //FCC
+    {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5},
+    {0.587 , 0.114 , 0.299 , -0.331, 0.5, -0.169, -0.419, -0.081, 0.5}, //SMPTE 170M
+    {0.701 , 0.087 , 0.212 , -0.384, 0.5  -0.116, -0.445, -0.055, 0.5}, //SMPTE 240M
+};
+
+/*
+NOTES
+Special versions: fast Y 1:1 scaling (no interpolation in y direction)
+
+TODO
+more intelligent misalignment avoidance for the horizontal scaler
+write special vertical cubic upscale version
+optimize C code (YV12 / minmax)
+add support for packed pixel YUV input & output
+add support for Y8 output
+optimize BGR24 & BGR32
+add BGR4 output support
+write special BGR->BGR scaler
+*/
+
+#if ARCH_X86
+DECLARE_ASM_CONST(8, uint64_t, bF8)=       0xF8F8F8F8F8F8F8F8LL;
+DECLARE_ASM_CONST(8, uint64_t, bFC)=       0xFCFCFCFCFCFCFCFCLL;
+DECLARE_ASM_CONST(8, uint64_t, w10)=       0x0010001000100010LL;
+DECLARE_ASM_CONST(8, uint64_t, w02)=       0x0002000200020002LL;
+DECLARE_ASM_CONST(8, uint64_t, bm00001111)=0x00000000FFFFFFFFLL;
+DECLARE_ASM_CONST(8, uint64_t, bm00000111)=0x0000000000FFFFFFLL;
+DECLARE_ASM_CONST(8, uint64_t, bm11111000)=0xFFFFFFFFFF000000LL;
+DECLARE_ASM_CONST(8, uint64_t, bm01010101)=0x00FF00FF00FF00FFLL;
+
+const DECLARE_ALIGNED(8, uint64_t, ff_dither4)[2] = {
+        0x0103010301030103LL,
+        0x0200020002000200LL,};
+
+const DECLARE_ALIGNED(8, uint64_t, ff_dither8)[2] = {
+        0x0602060206020602LL,
+        0x0004000400040004LL,};
+
+DECLARE_ASM_CONST(8, uint64_t, b16Mask)=   0x001F001F001F001FLL;
+DECLARE_ASM_CONST(8, uint64_t, g16Mask)=   0x07E007E007E007E0LL;
+DECLARE_ASM_CONST(8, uint64_t, r16Mask)=   0xF800F800F800F800LL;
+DECLARE_ASM_CONST(8, uint64_t, b15Mask)=   0x001F001F001F001FLL;
+DECLARE_ASM_CONST(8, uint64_t, g15Mask)=   0x03E003E003E003E0LL;
+DECLARE_ASM_CONST(8, uint64_t, r15Mask)=   0x7C007C007C007C00LL;
+
+DECLARE_ALIGNED(8, const uint64_t, ff_M24A)         = 0x00FF0000FF0000FFLL;
+DECLARE_ALIGNED(8, const uint64_t, ff_M24B)         = 0xFF0000FF0000FF00LL;
+DECLARE_ALIGNED(8, const uint64_t, ff_M24C)         = 0x0000FF0000FF0000LL;
+
+#ifdef FAST_BGR2YV12
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff)   = 0x000000210041000DULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff)   = 0x0000FFEEFFDC0038ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff)   = 0x00000038FFD2FFF8ULL;
+#else
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff)   = 0x000020E540830C8BULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff)   = 0x0000ED0FDAC23831ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff)   = 0x00003831D0E6F6EAULL;
+#endif /* FAST_BGR2YV12 */
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YOffset)  = 0x1010101010101010ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_w1111)        = 0x0001000100010001ULL;
+
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toY1Coeff) = 0x0C88000040870C88ULL;
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toY2Coeff) = 0x20DE4087000020DEULL;
+DECLARE_ASM_CONST(8, uint64_t, ff_rgb24toY1Coeff) = 0x20DE0000408720DEULL;
+DECLARE_ASM_CONST(8, uint64_t, ff_rgb24toY2Coeff) = 0x0C88408700000C88ULL;
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toYOffset) = 0x0008400000084000ULL;
+
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUV)[2][4] = {
+    {0x38380000DAC83838ULL, 0xECFFDAC80000ECFFULL, 0xF6E40000D0E3F6E4ULL, 0x3838D0E300003838ULL},
+    {0xECFF0000DAC8ECFFULL, 0x3838DAC800003838ULL, 0x38380000D0E33838ULL, 0xF6E4D0E30000F6E4ULL},
+};
+
+DECLARE_ASM_CONST(8, uint64_t, ff_bgr24toUVOffset)= 0x0040400000404000ULL;
+
+#endif /* ARCH_X86 */
+
+DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_4)[2][8]={
+{  1,   3,   1,   3,   1,   3,   1,   3, },
+{  2,   0,   2,   0,   2,   0,   2,   0, },
+};
+
+DECLARE_ALIGNED(8, static const uint8_t, dither_2x2_8)[2][8]={
+{  6,   2,   6,   2,   6,   2,   6,   2, },
+{  0,   4,   0,   4,   0,   4,   0,   4, },
+};
+
+DECLARE_ALIGNED(8, const uint8_t, dither_4x4_16)[4][8]={
+{  8,   4,  11,   7,   8,   4,  11,   7, },
+{  2,  14,   1,  13,   2,  14,   1,  13, },
+{ 10,   6,   9,   5,  10,   6,   9,   5, },
+{  0,  12,   3,  15,   0,  12,   3,  15, },
+};
+
+DECLARE_ALIGNED(8, const uint8_t, dither_8x8_32)[8][8]={
+{ 17,   9,  23,  15,  16,   8,  22,  14, },
+{  5,  29,   3,  27,   4,  28,   2,  26, },
+{ 21,  13,  19,  11,  20,  12,  18,  10, },
+{  0,  24,   6,  30,   1,  25,   7,  31, },
+{ 16,   8,  22,  14,  17,   9,  23,  15, },
+{  4,  28,   2,  26,   5,  29,   3,  27, },
+{ 20,  12,  18,  10,  21,  13,  19,  11, },
+{  1,  25,   7,  31,   0,  24,   6,  30, },
+};
+
+DECLARE_ALIGNED(8, const uint8_t, dither_8x8_73)[8][8]={
+{  0,  55,  14,  68,   3,  58,  17,  72, },
+{ 37,  18,  50,  32,  40,  22,  54,  35, },
+{  9,  64,   5,  59,  13,  67,   8,  63, },
+{ 46,  27,  41,  23,  49,  31,  44,  26, },
+{  2,  57,  16,  71,   1,  56,  15,  70, },
+{ 39,  21,  52,  34,  38,  19,  51,  33, },
+{ 11,  66,   7,  62,  10,  65,   6,  60, },
+{ 48,  30,  43,  25,  47,  29,  42,  24, },
+};
+
+#if 1
+DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
+{117,  62, 158, 103, 113,  58, 155, 100, },
+{ 34, 199,  21, 186,  31, 196,  17, 182, },
+{144,  89, 131,  76, 141,  86, 127,  72, },
+{  0, 165,  41, 206,  10, 175,  52, 217, },
+{110,  55, 151,  96, 120,  65, 162, 107, },
+{ 28, 193,  14, 179,  38, 203,  24, 189, },
+{138,  83, 124,  69, 148,  93, 134,  79, },
+{  7, 172,  48, 213,   3, 168,  45, 210, },
+};
+#elif 1
+// tries to correct a gamma of 1.5
+DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
+{  0, 143,  18, 200,   2, 156,  25, 215, },
+{ 78,  28, 125,  64,  89,  36, 138,  74, },
+{ 10, 180,   3, 161,  16, 195,   8, 175, },
+{109,  51,  93,  38, 121,  60, 105,  47, },
+{  1, 152,  23, 210,   0, 147,  20, 205, },
+{ 85,  33, 134,  71,  81,  30, 130,  67, },
+{ 14, 190,   6, 171,  12, 185,   5, 166, },
+{117,  57, 101,  44, 113,  54,  97,  41, },
+};
+#elif 1
+// tries to correct a gamma of 2.0
+DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
+{  0, 124,   8, 193,   0, 140,  12, 213, },
+{ 55,  14, 104,  42,  66,  19, 119,  52, },
+{  3, 168,   1, 145,   6, 187,   3, 162, },
+{ 86,  31,  70,  21,  99,  39,  82,  28, },
+{  0, 134,  11, 206,   0, 129,   9, 200, },
+{ 62,  17, 114,  48,  58,  16, 109,  45, },
+{  5, 181,   2, 157,   4, 175,   1, 151, },
+{ 95,  36,  78,  26,  90,  34,  74,  24, },
+};
+#else
+// tries to correct a gamma of 2.5
+DECLARE_ALIGNED(8, const uint8_t, dither_8x8_220)[8][8]={
+{  0, 107,   3, 187,   0, 125,   6, 212, },
+{ 39,   7,  86,  28,  49,  11, 102,  36, },
+{  1, 158,   0, 131,   3, 180,   1, 151, },
+{ 68,  19,  52,  12,  81,  25,  64,  17, },
+{  0, 119,   5, 203,   0, 113,   4, 195, },
+{ 45,   9,  96,  33,  42,   8,  91,  30, },
+{  2, 172,   1, 144,   2, 165,   0, 137, },
+{ 77,  23,  60,  15,  72,  21,  56,  14, },
+};
+#endif
+
+static av_always_inline void yuv2yuvX16inC_template(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                                                    const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                                                    const int16_t **alpSrc, uint16_t *dest, uint16_t *uDest, uint16_t *vDest, uint16_t *aDest,
+                                                    int dstW, int chrDstW, int big_endian)
+{
+    //FIXME Optimize (just quickly written not optimized..)
+    int i;
+
+    for (i = 0; i < dstW; i++) {
+        int val = 1 << 10;
+        int j;
+
+        for (j = 0; j < lumFilterSize; j++)
+            val += lumSrc[j][i] * lumFilter[j];
+
+        if (big_endian) {
+            AV_WB16(&dest[i], av_clip_uint16(val >> 11));
+        } else {
+            AV_WL16(&dest[i], av_clip_uint16(val >> 11));
+        }
+    }
+
+    if (uDest) {
+        for (i = 0; i < chrDstW; i++) {
+            int u = 1 << 10;
+            int v = 1 << 10;
+            int j;
+
+            for (j = 0; j < chrFilterSize; j++) {
+                u += chrSrc[j][i       ] * chrFilter[j];
+                v += chrSrc[j][i + VOFW] * chrFilter[j];
+            }
+
+            if (big_endian) {
+                AV_WB16(&uDest[i], av_clip_uint16(u >> 11));
+                AV_WB16(&vDest[i], av_clip_uint16(v >> 11));
+            } else {
+                AV_WL16(&uDest[i], av_clip_uint16(u >> 11));
+                AV_WL16(&vDest[i], av_clip_uint16(v >> 11));
+            }
+        }
+    }
+
+    if (CONFIG_SWSCALE_ALPHA && aDest) {
+        for (i = 0; i < dstW; i++) {
+            int val = 1 << 10;
+            int j;
+
+            for (j = 0; j < lumFilterSize; j++)
+                val += alpSrc[j][i] * lumFilter[j];
+
+            if (big_endian) {
+                AV_WB16(&aDest[i], av_clip_uint16(val >> 11));
+            } else {
+                AV_WL16(&aDest[i], av_clip_uint16(val >> 11));
+            }
+        }
+    }
+}
+
+static inline void yuv2yuvX16inC(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                                 const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                                 const int16_t **alpSrc, uint16_t *dest, uint16_t *uDest, uint16_t *vDest, uint16_t *aDest, int dstW, int chrDstW,
+                                 enum PixelFormat dstFormat)
+{
+    if (isBE(dstFormat)) {
+        yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize,
+                               chrFilter, chrSrc, chrFilterSize,
+                               alpSrc,
+                               dest, uDest, vDest, aDest,
+                               dstW, chrDstW, 1);
+    } else {
+        yuv2yuvX16inC_template(lumFilter, lumSrc, lumFilterSize,
+                               chrFilter, chrSrc, chrFilterSize,
+                               alpSrc,
+                               dest, uDest, vDest, aDest,
+                               dstW, chrDstW, 0);
+    }
+}
+
+static inline void yuv2yuvXinC(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                               const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                               const int16_t **alpSrc, uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, int dstW, int chrDstW)
+{
+    //FIXME Optimize (just quickly written not optimized..)
+    int i;
+    for (i=0; i<dstW; i++) {
+        int val=1<<18;
+        int j;
+        for (j=0; j<lumFilterSize; j++)
+            val += lumSrc[j][i] * lumFilter[j];
+
+        dest[i]= av_clip_uint8(val>>19);
+    }
+
+    if (uDest)
+        for (i=0; i<chrDstW; i++) {
+            int u=1<<18;
+            int v=1<<18;
+            int j;
+            for (j=0; j<chrFilterSize; j++) {
+                u += chrSrc[j][i] * chrFilter[j];
+                v += chrSrc[j][i + VOFW] * chrFilter[j];
+            }
+
+            uDest[i]= av_clip_uint8(u>>19);
+            vDest[i]= av_clip_uint8(v>>19);
+        }
+
+    if (CONFIG_SWSCALE_ALPHA && aDest)
+        for (i=0; i<dstW; i++) {
+            int val=1<<18;
+            int j;
+            for (j=0; j<lumFilterSize; j++)
+                val += alpSrc[j][i] * lumFilter[j];
+
+            aDest[i]= av_clip_uint8(val>>19);
+        }
+
+}
+
+static inline void yuv2nv12XinC(const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                                const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                                uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
+{
+    //FIXME Optimize (just quickly written not optimized..)
+    int i;
+    for (i=0; i<dstW; i++) {
+        int val=1<<18;
+        int j;
+        for (j=0; j<lumFilterSize; j++)
+            val += lumSrc[j][i] * lumFilter[j];
+
+        dest[i]= av_clip_uint8(val>>19);
+    }
+
+    if (!uDest)
+        return;
+
+    if (dstFormat == PIX_FMT_NV12)
+        for (i=0; i<chrDstW; i++) {
+            int u=1<<18;
+            int v=1<<18;
+            int j;
+            for (j=0; j<chrFilterSize; j++) {
+                u += chrSrc[j][i] * chrFilter[j];
+                v += chrSrc[j][i + VOFW] * chrFilter[j];
+            }
+
+            uDest[2*i]= av_clip_uint8(u>>19);
+            uDest[2*i+1]= av_clip_uint8(v>>19);
+        }
+    else
+        for (i=0; i<chrDstW; i++) {
+            int u=1<<18;
+            int v=1<<18;
+            int j;
+            for (j=0; j<chrFilterSize; j++) {
+                u += chrSrc[j][i] * chrFilter[j];
+                v += chrSrc[j][i + VOFW] * chrFilter[j];
+            }
+
+            uDest[2*i]= av_clip_uint8(v>>19);
+            uDest[2*i+1]= av_clip_uint8(u>>19);
+        }
+}
+
+#define YSCALE_YUV_2_PACKEDX_NOCLIP_C(type,alpha) \
+    for (i=0; i<(dstW>>1); i++) {\
+        int j;\
+        int Y1 = 1<<18;\
+        int Y2 = 1<<18;\
+        int U  = 1<<18;\
+        int V  = 1<<18;\
+        int av_unused A1, A2;\
+        type av_unused *r, *b, *g;\
+        const int i2= 2*i;\
+        \
+        for (j=0; j<lumFilterSize; j++) {\
+            Y1 += lumSrc[j][i2] * lumFilter[j];\
+            Y2 += lumSrc[j][i2+1] * lumFilter[j];\
+        }\
+        for (j=0; j<chrFilterSize; j++) {\
+            U += chrSrc[j][i] * chrFilter[j];\
+            V += chrSrc[j][i+VOFW] * chrFilter[j];\
+        }\
+        Y1>>=19;\
+        Y2>>=19;\
+        U >>=19;\
+        V >>=19;\
+        if (alpha) {\
+            A1 = 1<<18;\
+            A2 = 1<<18;\
+            for (j=0; j<lumFilterSize; j++) {\
+                A1 += alpSrc[j][i2  ] * lumFilter[j];\
+                A2 += alpSrc[j][i2+1] * lumFilter[j];\
+            }\
+            A1>>=19;\
+            A2>>=19;\
+        }
+
+#define YSCALE_YUV_2_PACKEDX_C(type,alpha) \
+        YSCALE_YUV_2_PACKEDX_NOCLIP_C(type,alpha)\
+        if ((Y1|Y2|U|V)&256) {\
+            if (Y1>255)   Y1=255; \
+            else if (Y1<0)Y1=0;   \
+            if (Y2>255)   Y2=255; \
+            else if (Y2<0)Y2=0;   \
+            if (U>255)    U=255;  \
+            else if (U<0) U=0;    \
+            if (V>255)    V=255;  \
+            else if (V<0) V=0;    \
+        }\
+        if (alpha && ((A1|A2)&256)) {\
+            A1=av_clip_uint8(A1);\
+            A2=av_clip_uint8(A2);\
+        }
+
+#define YSCALE_YUV_2_PACKEDX_FULL_C(rnd,alpha) \
+    for (i=0; i<dstW; i++) {\
+        int j;\
+        int Y = 0;\
+        int U = -128<<19;\
+        int V = -128<<19;\
+        int av_unused A;\
+        int R,G,B;\
+        \
+        for (j=0; j<lumFilterSize; j++) {\
+            Y += lumSrc[j][i     ] * lumFilter[j];\
+        }\
+        for (j=0; j<chrFilterSize; j++) {\
+            U += chrSrc[j][i     ] * chrFilter[j];\
+            V += chrSrc[j][i+VOFW] * chrFilter[j];\
+        }\
+        Y >>=10;\
+        U >>=10;\
+        V >>=10;\
+        if (alpha) {\
+            A = rnd;\
+            for (j=0; j<lumFilterSize; j++)\
+                A += alpSrc[j][i     ] * lumFilter[j];\
+            A >>=19;\
+            if (A&256)\
+                A = av_clip_uint8(A);\
+        }
+
+#define YSCALE_YUV_2_RGBX_FULL_C(rnd,alpha) \
+    YSCALE_YUV_2_PACKEDX_FULL_C(rnd>>3,alpha)\
+        Y-= c->yuv2rgb_y_offset;\
+        Y*= c->yuv2rgb_y_coeff;\
+        Y+= rnd;\
+        R= Y + V*c->yuv2rgb_v2r_coeff;\
+        G= Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff;\
+        B= Y +                          U*c->yuv2rgb_u2b_coeff;\
+        if ((R|G|B)&(0xC0000000)) {\
+            if (R>=(256<<22))   R=(256<<22)-1; \
+            else if (R<0)R=0;   \
+            if (G>=(256<<22))   G=(256<<22)-1; \
+            else if (G<0)G=0;   \
+            if (B>=(256<<22))   B=(256<<22)-1; \
+            else if (B<0)B=0;   \
+        }
+
+#define YSCALE_YUV_2_GRAY16_C \
+    for (i=0; i<(dstW>>1); i++) {\
+        int j;\
+        int Y1 = 1<<18;\
+        int Y2 = 1<<18;\
+        int U  = 1<<18;\
+        int V  = 1<<18;\
+        \
+        const int i2= 2*i;\
+        \
+        for (j=0; j<lumFilterSize; j++) {\
+            Y1 += lumSrc[j][i2] * lumFilter[j];\
+            Y2 += lumSrc[j][i2+1] * lumFilter[j];\
+        }\
+        Y1>>=11;\
+        Y2>>=11;\
+        if ((Y1|Y2|U|V)&65536) {\
+            if (Y1>65535)   Y1=65535; \
+            else if (Y1<0)Y1=0;   \
+            if (Y2>65535)   Y2=65535; \
+            else if (Y2<0)Y2=0;   \
+        }
+
+#define YSCALE_YUV_2_RGBX_C(type,alpha) \
+    YSCALE_YUV_2_PACKEDX_C(type,alpha)  /* FIXME fix tables so that clipping is not needed and then use _NOCLIP*/\
+    r = (type *)c->table_rV[V];   \
+    g = (type *)(c->table_gU[U] + c->table_gV[V]); \
+    b = (type *)c->table_bU[U];
+
+#define YSCALE_YUV_2_PACKED2_C(type,alpha)   \
+    for (i=0; i<(dstW>>1); i++) { \
+        const int i2= 2*i;       \
+        int Y1= (buf0[i2  ]*yalpha1+buf1[i2  ]*yalpha)>>19;           \
+        int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19;           \
+        int U= (uvbuf0[i     ]*uvalpha1+uvbuf1[i     ]*uvalpha)>>19;  \
+        int V= (uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19;  \
+        type av_unused *r, *b, *g;                                    \
+        int av_unused A1, A2;                                         \
+        if (alpha) {\
+            A1= (abuf0[i2  ]*yalpha1+abuf1[i2  ]*yalpha)>>19;         \
+            A2= (abuf0[i2+1]*yalpha1+abuf1[i2+1]*yalpha)>>19;         \
+        }
+
+#define YSCALE_YUV_2_GRAY16_2_C   \
+    for (i=0; i<(dstW>>1); i++) { \
+        const int i2= 2*i;       \
+        int Y1= (buf0[i2  ]*yalpha1+buf1[i2  ]*yalpha)>>11;           \
+        int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>11;
+
+#define YSCALE_YUV_2_RGB2_C(type,alpha) \
+    YSCALE_YUV_2_PACKED2_C(type,alpha)\
+    r = (type *)c->table_rV[V];\
+    g = (type *)(c->table_gU[U] + c->table_gV[V]);\
+    b = (type *)c->table_bU[U];
+
+#define YSCALE_YUV_2_PACKED1_C(type,alpha) \
+    for (i=0; i<(dstW>>1); i++) {\
+        const int i2= 2*i;\
+        int Y1= buf0[i2  ]>>7;\
+        int Y2= buf0[i2+1]>>7;\
+        int U= (uvbuf1[i     ])>>7;\
+        int V= (uvbuf1[i+VOFW])>>7;\
+        type av_unused *r, *b, *g;\
+        int av_unused A1, A2;\
+        if (alpha) {\
+            A1= abuf0[i2  ]>>7;\
+            A2= abuf0[i2+1]>>7;\
+        }
+
+#define YSCALE_YUV_2_GRAY16_1_C \
+    for (i=0; i<(dstW>>1); i++) {\
+        const int i2= 2*i;\
+        int Y1= buf0[i2  ]<<1;\
+        int Y2= buf0[i2+1]<<1;
+
+#define YSCALE_YUV_2_RGB1_C(type,alpha) \
+    YSCALE_YUV_2_PACKED1_C(type,alpha)\
+    r = (type *)c->table_rV[V];\
+    g = (type *)(c->table_gU[U] + c->table_gV[V]);\
+    b = (type *)c->table_bU[U];
+
+#define YSCALE_YUV_2_PACKED1B_C(type,alpha) \
+    for (i=0; i<(dstW>>1); i++) {\
+        const int i2= 2*i;\
+        int Y1= buf0[i2  ]>>7;\
+        int Y2= buf0[i2+1]>>7;\
+        int U= (uvbuf0[i     ] + uvbuf1[i     ])>>8;\
+        int V= (uvbuf0[i+VOFW] + uvbuf1[i+VOFW])>>8;\
+        type av_unused *r, *b, *g;\
+        int av_unused A1, A2;\
+        if (alpha) {\
+            A1= abuf0[i2  ]>>7;\
+            A2= abuf0[i2+1]>>7;\
+        }
+
+#define YSCALE_YUV_2_RGB1B_C(type,alpha) \
+    YSCALE_YUV_2_PACKED1B_C(type,alpha)\
+    r = (type *)c->table_rV[V];\
+    g = (type *)(c->table_gU[U] + c->table_gV[V]);\
+    b = (type *)c->table_bU[U];
+
+#define YSCALE_YUV_2_MONO2_C \
+    const uint8_t * const d128=dither_8x8_220[y&7];\
+    uint8_t *g= c->table_gU[128] + c->table_gV[128];\
+    for (i=0; i<dstW-7; i+=8) {\
+        int acc;\
+        acc =       g[((buf0[i  ]*yalpha1+buf1[i  ]*yalpha)>>19) + d128[0]];\
+        acc+= acc + g[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19) + d128[1]];\
+        acc+= acc + g[((buf0[i+2]*yalpha1+buf1[i+2]*yalpha)>>19) + d128[2]];\
+        acc+= acc + g[((buf0[i+3]*yalpha1+buf1[i+3]*yalpha)>>19) + d128[3]];\
+        acc+= acc + g[((buf0[i+4]*yalpha1+buf1[i+4]*yalpha)>>19) + d128[4]];\
+        acc+= acc + g[((buf0[i+5]*yalpha1+buf1[i+5]*yalpha)>>19) + d128[5]];\
+        acc+= acc + g[((buf0[i+6]*yalpha1+buf1[i+6]*yalpha)>>19) + d128[6]];\
+        acc+= acc + g[((buf0[i+7]*yalpha1+buf1[i+7]*yalpha)>>19) + d128[7]];\
+        ((uint8_t*)dest)[0]= c->dstFormat == PIX_FMT_MONOBLACK ? acc : ~acc;\
+        dest++;\
+    }
+
+#define YSCALE_YUV_2_MONOX_C \
+    const uint8_t * const d128=dither_8x8_220[y&7];\
+    uint8_t *g= c->table_gU[128] + c->table_gV[128];\
+    int acc=0;\
+    for (i=0; i<dstW-1; i+=2) {\
+        int j;\
+        int Y1=1<<18;\
+        int Y2=1<<18;\
+\
+        for (j=0; j<lumFilterSize; j++) {\
+            Y1 += lumSrc[j][i] * lumFilter[j];\
+            Y2 += lumSrc[j][i+1] * lumFilter[j];\
+        }\
+        Y1>>=19;\
+        Y2>>=19;\
+        if ((Y1|Y2)&256) {\
+            if (Y1>255)   Y1=255;\
+            else if (Y1<0)Y1=0;\
+            if (Y2>255)   Y2=255;\
+            else if (Y2<0)Y2=0;\
+        }\
+        acc+= acc + g[Y1+d128[(i+0)&7]];\
+        acc+= acc + g[Y2+d128[(i+1)&7]];\
+        if ((i&7)==6) {\
+            ((uint8_t*)dest)[0]= c->dstFormat == PIX_FMT_MONOBLACK ? acc : ~acc;\
+            dest++;\
+        }\
+    }
+
+#define YSCALE_YUV_2_ANYRGB_C(func, func2, func_g16, func_monoblack)\
+    switch(c->dstFormat) {\
+    case PIX_FMT_RGB48BE:\
+    case PIX_FMT_RGB48LE:\
+        func(uint8_t,0)\
+            ((uint8_t*)dest)[ 0]= r[Y1];\
+            ((uint8_t*)dest)[ 1]= r[Y1];\
+            ((uint8_t*)dest)[ 2]= g[Y1];\
+            ((uint8_t*)dest)[ 3]= g[Y1];\
+            ((uint8_t*)dest)[ 4]= b[Y1];\
+            ((uint8_t*)dest)[ 5]= b[Y1];\
+            ((uint8_t*)dest)[ 6]= r[Y2];\
+            ((uint8_t*)dest)[ 7]= r[Y2];\
+            ((uint8_t*)dest)[ 8]= g[Y2];\
+            ((uint8_t*)dest)[ 9]= g[Y2];\
+            ((uint8_t*)dest)[10]= b[Y2];\
+            ((uint8_t*)dest)[11]= b[Y2];\
+            dest+=12;\
+        }\
+        break;\
+    case PIX_FMT_RGBA:\
+    case PIX_FMT_BGRA:\
+        if (CONFIG_SMALL) {\
+            int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\
+            func(uint32_t,needAlpha)\
+                ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? (A1<<24) : 0);\
+                ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? (A2<<24) : 0);\
+            }\
+        } else {\
+            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {\
+                func(uint32_t,1)\
+                    ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (A1<<24);\
+                    ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (A2<<24);\
+                }\
+            } else {\
+                func(uint32_t,0)\
+                    ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
+                    ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
+                }\
+            }\
+        }\
+        break;\
+    case PIX_FMT_ARGB:\
+    case PIX_FMT_ABGR:\
+        if (CONFIG_SMALL) {\
+            int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;\
+            func(uint32_t,needAlpha)\
+                ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + (needAlpha ? A1 : 0);\
+                ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + (needAlpha ? A2 : 0);\
+            }\
+        } else {\
+            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {\
+                func(uint32_t,1)\
+                    ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1] + A1;\
+                    ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2] + A2;\
+                }\
+            } else {\
+                func(uint32_t,0)\
+                    ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
+                    ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
+                }\
+            }\
+        }                \
+        break;\
+    case PIX_FMT_RGB24:\
+        func(uint8_t,0)\
+            ((uint8_t*)dest)[0]= r[Y1];\
+            ((uint8_t*)dest)[1]= g[Y1];\
+            ((uint8_t*)dest)[2]= b[Y1];\
+            ((uint8_t*)dest)[3]= r[Y2];\
+            ((uint8_t*)dest)[4]= g[Y2];\
+            ((uint8_t*)dest)[5]= b[Y2];\
+            dest+=6;\
+        }\
+        break;\
+    case PIX_FMT_BGR24:\
+        func(uint8_t,0)\
+            ((uint8_t*)dest)[0]= b[Y1];\
+            ((uint8_t*)dest)[1]= g[Y1];\
+            ((uint8_t*)dest)[2]= r[Y1];\
+            ((uint8_t*)dest)[3]= b[Y2];\
+            ((uint8_t*)dest)[4]= g[Y2];\
+            ((uint8_t*)dest)[5]= r[Y2];\
+            dest+=6;\
+        }\
+        break;\
+    case PIX_FMT_RGB565BE:\
+    case PIX_FMT_RGB565LE:\
+    case PIX_FMT_BGR565BE:\
+    case PIX_FMT_BGR565LE:\
+        {\
+            const int dr1= dither_2x2_8[y&1    ][0];\
+            const int dg1= dither_2x2_4[y&1    ][0];\
+            const int db1= dither_2x2_8[(y&1)^1][0];\
+            const int dr2= dither_2x2_8[y&1    ][1];\
+            const int dg2= dither_2x2_4[y&1    ][1];\
+            const int db2= dither_2x2_8[(y&1)^1][1];\
+            func(uint16_t,0)\
+                ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+                ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+            }\
+        }\
+        break;\
+    case PIX_FMT_RGB555BE:\
+    case PIX_FMT_RGB555LE:\
+    case PIX_FMT_BGR555BE:\
+    case PIX_FMT_BGR555LE:\
+        {\
+            const int dr1= dither_2x2_8[y&1    ][0];\
+            const int dg1= dither_2x2_8[y&1    ][1];\
+            const int db1= dither_2x2_8[(y&1)^1][0];\
+            const int dr2= dither_2x2_8[y&1    ][1];\
+            const int dg2= dither_2x2_8[y&1    ][0];\
+            const int db2= dither_2x2_8[(y&1)^1][1];\
+            func(uint16_t,0)\
+                ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+                ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+            }\
+        }\
+        break;\
+    case PIX_FMT_RGB444BE:\
+    case PIX_FMT_RGB444LE:\
+    case PIX_FMT_BGR444BE:\
+    case PIX_FMT_BGR444LE:\
+        {\
+            const int dr1= dither_4x4_16[y&3    ][0];\
+            const int dg1= dither_4x4_16[y&3    ][1];\
+            const int db1= dither_4x4_16[(y&3)^3][0];\
+            const int dr2= dither_4x4_16[y&3    ][1];\
+            const int dg2= dither_4x4_16[y&3    ][0];\
+            const int db2= dither_4x4_16[(y&3)^3][1];\
+            func(uint16_t,0)\
+                ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+                ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+            }\
+        }\
+        break;\
+    case PIX_FMT_RGB8:\
+    case PIX_FMT_BGR8:\
+        {\
+            const uint8_t * const d64= dither_8x8_73[y&7];\
+            const uint8_t * const d32= dither_8x8_32[y&7];\
+            func(uint8_t,0)\
+                ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\
+                ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\
+            }\
+        }\
+        break;\
+    case PIX_FMT_RGB4:\
+    case PIX_FMT_BGR4:\
+        {\
+            const uint8_t * const d64= dither_8x8_73 [y&7];\
+            const uint8_t * const d128=dither_8x8_220[y&7];\
+            func(uint8_t,0)\
+                ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\
+                                 + ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\
+            }\
+        }\
+        break;\
+    case PIX_FMT_RGB4_BYTE:\
+    case PIX_FMT_BGR4_BYTE:\
+        {\
+            const uint8_t * const d64= dither_8x8_73 [y&7];\
+            const uint8_t * const d128=dither_8x8_220[y&7];\
+            func(uint8_t,0)\
+                ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\
+                ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\
+            }\
+        }\
+        break;\
+    case PIX_FMT_MONOBLACK:\
+    case PIX_FMT_MONOWHITE:\
+        {\
+            func_monoblack\
+        }\
+        break;\
+    case PIX_FMT_YUYV422:\
+        func2\
+            ((uint8_t*)dest)[2*i2+0]= Y1;\
+            ((uint8_t*)dest)[2*i2+1]= U;\
+            ((uint8_t*)dest)[2*i2+2]= Y2;\
+            ((uint8_t*)dest)[2*i2+3]= V;\
+        }                \
+        break;\
+    case PIX_FMT_UYVY422:\
+        func2\
+            ((uint8_t*)dest)[2*i2+0]= U;\
+            ((uint8_t*)dest)[2*i2+1]= Y1;\
+            ((uint8_t*)dest)[2*i2+2]= V;\
+            ((uint8_t*)dest)[2*i2+3]= Y2;\
+        }                \
+        break;\
+    case PIX_FMT_GRAY16BE:\
+        func_g16\
+            ((uint8_t*)dest)[2*i2+0]= Y1>>8;\
+            ((uint8_t*)dest)[2*i2+1]= Y1;\
+            ((uint8_t*)dest)[2*i2+2]= Y2>>8;\
+            ((uint8_t*)dest)[2*i2+3]= Y2;\
+        }                \
+        break;\
+    case PIX_FMT_GRAY16LE:\
+        func_g16\
+            ((uint8_t*)dest)[2*i2+0]= Y1;\
+            ((uint8_t*)dest)[2*i2+1]= Y1>>8;\
+            ((uint8_t*)dest)[2*i2+2]= Y2;\
+            ((uint8_t*)dest)[2*i2+3]= Y2>>8;\
+        }                \
+        break;\
+    }
+
+static inline void yuv2packedXinC(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                                  const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                                  const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
+{
+    int i;
+    YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGBX_C, YSCALE_YUV_2_PACKEDX_C(void,0), YSCALE_YUV_2_GRAY16_C, YSCALE_YUV_2_MONOX_C)
+}
+
+static inline void yuv2rgbXinC_full(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                                    const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                                    const int16_t **alpSrc, uint8_t *dest, int dstW, int y)
+{
+    int i;
+    int step= c->dstFormatBpp/8;
+    int aidx= 3;
+
+    switch(c->dstFormat) {
+    case PIX_FMT_ARGB:
+        dest++;
+        aidx= 0;
+    case PIX_FMT_RGB24:
+        aidx--;
+    case PIX_FMT_RGBA:
+        if (CONFIG_SMALL) {
+            int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;
+            YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha)
+                dest[aidx]= needAlpha ? A : 255;
+                dest[0]= R>>22;
+                dest[1]= G>>22;
+                dest[2]= B>>22;
+                dest+= step;
+            }
+        } else {
+            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
+                YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1)
+                    dest[aidx]= A;
+                    dest[0]= R>>22;
+                    dest[1]= G>>22;
+                    dest[2]= B>>22;
+                    dest+= step;
+                }
+            } else {
+                YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0)
+                    dest[aidx]= 255;
+                    dest[0]= R>>22;
+                    dest[1]= G>>22;
+                    dest[2]= B>>22;
+                    dest+= step;
+                }
+            }
+        }
+        break;
+    case PIX_FMT_ABGR:
+        dest++;
+        aidx= 0;
+    case PIX_FMT_BGR24:
+        aidx--;
+    case PIX_FMT_BGRA:
+        if (CONFIG_SMALL) {
+            int needAlpha = CONFIG_SWSCALE_ALPHA && c->alpPixBuf;
+            YSCALE_YUV_2_RGBX_FULL_C(1<<21, needAlpha)
+                dest[aidx]= needAlpha ? A : 255;
+                dest[0]= B>>22;
+                dest[1]= G>>22;
+                dest[2]= R>>22;
+                dest+= step;
+            }
+        } else {
+            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
+                YSCALE_YUV_2_RGBX_FULL_C(1<<21, 1)
+                    dest[aidx]= A;
+                    dest[0]= B>>22;
+                    dest[1]= G>>22;
+                    dest[2]= R>>22;
+                    dest+= step;
+                }
+            } else {
+                YSCALE_YUV_2_RGBX_FULL_C(1<<21, 0)
+                    dest[aidx]= 255;
+                    dest[0]= B>>22;
+                    dest[1]= G>>22;
+                    dest[2]= R>>22;
+                    dest+= step;
+                }
+            }
+        }
+        break;
+    default:
+        assert(0);
+    }
+}
+
+static void fillPlane(uint8_t* plane, int stride, int width, int height, int y, uint8_t val)
+{
+    int i;
+    uint8_t *ptr = plane + stride*y;
+    for (i=0; i<height; i++) {
+        memset(ptr, val, width);
+        ptr += stride;
+    }
+}
+
+static inline void rgb48ToY(uint8_t *dst, const uint8_t *src, int width,
+                            uint32_t *unused)
+{
+    int i;
+    for (i = 0; i < width; i++) {
+        int r = src[i*6+0];
+        int g = src[i*6+2];
+        int b = src[i*6+4];
+
+        dst[i] = (RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
+    }
+}
+
+static inline void rgb48ToUV(uint8_t *dstU, uint8_t *dstV,
+                             const uint8_t *src1, const uint8_t *src2,
+                             int width, uint32_t *unused)
+{
+    int i;
+    assert(src1==src2);
+    for (i = 0; i < width; i++) {
+        int r = src1[6*i + 0];
+        int g = src1[6*i + 2];
+        int b = src1[6*i + 4];
+
+        dstU[i] = (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
+        dstV[i] = (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1))) >> RGB2YUV_SHIFT;
+    }
+}
+
+static inline void rgb48ToUV_half(uint8_t *dstU, uint8_t *dstV,
+                                  const uint8_t *src1, const uint8_t *src2,
+                                  int width, uint32_t *unused)
+{
+    int i;
+    assert(src1==src2);
+    for (i = 0; i < width; i++) {
+        int r= src1[12*i + 0] + src1[12*i + 6];
+        int g= src1[12*i + 2] + src1[12*i + 8];
+        int b= src1[12*i + 4] + src1[12*i + 10];
+
+        dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT+1);
+        dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT)) >> (RGB2YUV_SHIFT+1);
+    }
+}
+
+#define BGR2Y(type, name, shr, shg, shb, maskr, maskg, maskb, RY, GY, BY, S)\
+static inline void name(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)\
+{\
+    int i;\
+    for (i=0; i<width; i++) {\
+        int b= (((const type*)src)[i]>>shb)&maskb;\
+        int g= (((const type*)src)[i]>>shg)&maskg;\
+        int r= (((const type*)src)[i]>>shr)&maskr;\
+\
+        dst[i]= (((RY)*r + (GY)*g + (BY)*b + (33<<((S)-1)))>>(S));\
+    }\
+}
+
+BGR2Y(uint32_t, bgr32ToY,16, 0, 0, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY   , BY<< 8, RGB2YUV_SHIFT+8)
+BGR2Y(uint32_t, rgb32ToY, 0, 0,16, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY   , BY<< 8, RGB2YUV_SHIFT+8)
+BGR2Y(uint16_t, bgr16ToY, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RY<<11, GY<<5, BY    , RGB2YUV_SHIFT+8)
+BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY    , RGB2YUV_SHIFT+7)
+BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY    , GY<<5, BY<<11, RGB2YUV_SHIFT+8)
+BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY    , GY<<5, BY<<10, RGB2YUV_SHIFT+7)
+
+static inline void abgrToA(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
+{
+    int i;
+    for (i=0; i<width; i++) {
+        dst[i]= src[4*i];
+    }
+}
+
+#define BGR2UV(type, name, shr, shg, shb, maska, maskr, maskg, maskb, RU, GU, BU, RV, GV, BV, S)\
+static inline void name(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, const uint8_t *dummy, long width, uint32_t *unused)\
+{\
+    int i;\
+    for (i=0; i<width; i++) {\
+        int b= (((const type*)src)[i]&maskb)>>shb;\
+        int g= (((const type*)src)[i]&maskg)>>shg;\
+        int r= (((const type*)src)[i]&maskr)>>shr;\
+\
+        dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<((S)-1)))>>(S);\
+        dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<((S)-1)))>>(S);\
+    }\
+}\
+static inline void name ## _half(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, const uint8_t *dummy, long width, uint32_t *unused)\
+{\
+    int i;\
+    for (i=0; i<width; i++) {\
+        int pix0= ((const type*)src)[2*i+0];\
+        int pix1= ((const type*)src)[2*i+1];\
+        int g= (pix0&~(maskr|maskb))+(pix1&~(maskr|maskb));\
+        int b= ((pix0+pix1-g)&(maskb|(2*maskb)))>>shb;\
+        int r= ((pix0+pix1-g)&(maskr|(2*maskr)))>>shr;\
+        g&= maskg|(2*maskg);\
+\
+        g>>=shg;\
+\
+        dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<(S)))>>((S)+1);\
+        dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<(S)))>>((S)+1);\
+    }\
+}
+
+BGR2UV(uint32_t, bgr32ToUV,16, 0, 0, 0xFF000000, 0xFF0000, 0xFF00,   0x00FF, RU<< 8, GU   , BU<< 8, RV<< 8, GV   , BV<< 8, RGB2YUV_SHIFT+8)
+BGR2UV(uint32_t, rgb32ToUV, 0, 0,16, 0xFF000000,   0x00FF, 0xFF00, 0xFF0000, RU<< 8, GU   , BU<< 8, RV<< 8, GV   , BV<< 8, RGB2YUV_SHIFT+8)
+BGR2UV(uint16_t, bgr16ToUV, 0, 0, 0,          0,   0x001F, 0x07E0,   0xF800, RU<<11, GU<<5, BU    , RV<<11, GV<<5, BV    , RGB2YUV_SHIFT+8)
+BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0,          0,   0x001F, 0x03E0,   0x7C00, RU<<10, GU<<5, BU    , RV<<10, GV<<5, BV    , RGB2YUV_SHIFT+7)
+BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0,          0,   0xF800, 0x07E0,   0x001F, RU    , GU<<5, BU<<11, RV    , GV<<5, BV<<11, RGB2YUV_SHIFT+8)
+BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0,          0,   0x7C00, 0x03E0,   0x001F, RU    , GU<<5, BU<<10, RV    , GV<<5, BV<<10, RGB2YUV_SHIFT+7)
+
+static inline void palToY(uint8_t *dst, const uint8_t *src, long width, uint32_t *pal)
+{
+    int i;
+    for (i=0; i<width; i++) {
+        int d= src[i];
+
+        dst[i]= pal[d] & 0xFF;
+    }
+}
+
+static inline void palToUV(uint8_t *dstU, uint8_t *dstV,
+                           const uint8_t *src1, const uint8_t *src2,
+                           long width, uint32_t *pal)
+{
+    int i;
+    assert(src1 == src2);
+    for (i=0; i<width; i++) {
+        int p= pal[src1[i]];
+
+        dstU[i]= p>>8;
+        dstV[i]= p>>16;
+    }
+}
+
+static inline void monowhite2Y(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
+{
+    int i, j;
+    for (i=0; i<width/8; i++) {
+        int d= ~src[i];
+        for(j=0; j<8; j++)
+            dst[8*i+j]= ((d>>(7-j))&1)*255;
+    }
+}
+
+static inline void monoblack2Y(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
+{
+    int i, j;
+    for (i=0; i<width/8; i++) {
+        int d= src[i];
+        for(j=0; j<8; j++)
+            dst[8*i+j]= ((d>>(7-j))&1)*255;
+    }
+}
+
+//Note: we have C, MMX, MMX2, 3DNOW versions, there is no 3DNOW+MMX2 one
+//Plain C versions
+#if (!HAVE_MMX && !HAVE_ALTIVEC) || CONFIG_RUNTIME_CPUDETECT
+#define COMPILE_C
+#endif
+
+#if ARCH_PPC
+#if HAVE_ALTIVEC
+#define COMPILE_ALTIVEC
+#endif
+#endif //ARCH_PPC
+
+#if ARCH_X86
+
+#if (HAVE_MMX && !HAVE_AMD3DNOW && !HAVE_MMX2) || CONFIG_RUNTIME_CPUDETECT
+#define COMPILE_MMX
+#endif
+
+#if HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT
+#define COMPILE_MMX2
+#endif
+
+#if (HAVE_AMD3DNOW && !HAVE_MMX2) || CONFIG_RUNTIME_CPUDETECT
+#define COMPILE_3DNOW
+#endif
+#endif //ARCH_X86
+
+#define COMPILE_TEMPLATE_MMX 0
+#define COMPILE_TEMPLATE_MMX2 0
+#define COMPILE_TEMPLATE_AMD3DNOW 0
+#define COMPILE_TEMPLATE_ALTIVEC 0
+
+#ifdef COMPILE_C
+#define RENAME(a) a ## _C
+#include "swscale_template.c"
+#endif
+
+#ifdef COMPILE_ALTIVEC
+#undef RENAME
+#undef COMPILE_TEMPLATE_ALTIVEC
+#define COMPILE_TEMPLATE_ALTIVEC 1
+#define RENAME(a) a ## _altivec
+#include "swscale_template.c"
+#endif
+
+#if ARCH_X86
+
+//MMX versions
+#ifdef COMPILE_MMX
+#undef RENAME
+#undef COMPILE_TEMPLATE_MMX
+#undef COMPILE_TEMPLATE_MMX2
+#undef COMPILE_TEMPLATE_AMD3DNOW
+#define COMPILE_TEMPLATE_MMX 1
+#define COMPILE_TEMPLATE_MMX2 0
+#define COMPILE_TEMPLATE_AMD3DNOW 0
+#define RENAME(a) a ## _MMX
+#include "swscale_template.c"
+#endif
+
+//MMX2 versions
+#ifdef COMPILE_MMX2
+#undef RENAME
+#undef COMPILE_TEMPLATE_MMX
+#undef COMPILE_TEMPLATE_MMX2
+#undef COMPILE_TEMPLATE_AMD3DNOW
+#define COMPILE_TEMPLATE_MMX 1
+#define COMPILE_TEMPLATE_MMX2 1
+#define COMPILE_TEMPLATE_AMD3DNOW 0
+#define RENAME(a) a ## _MMX2
+#include "swscale_template.c"
+#endif
+
+//3DNOW versions
+#ifdef COMPILE_3DNOW
+#undef RENAME
+#undef COMPILE_TEMPLATE_MMX
+#undef COMPILE_TEMPLATE_MMX2
+#undef COMPILE_TEMPLATE_AMD3DNOW
+#define COMPILE_TEMPLATE_MMX 1
+#define COMPILE_TEMPLATE_MMX2 0
+#define COMPILE_TEMPLATE_AMD3DNOW 1
+#define RENAME(a) a ## _3DNow
+#include "swscale_template.c"
+#endif
+
+#endif //ARCH_X86
+
+SwsFunc ff_getSwsFunc(SwsContext *c)
+{
+#if CONFIG_RUNTIME_CPUDETECT
+    int flags = c->flags;
+
+#if ARCH_X86
+    // ordered per speed fastest first
+    if (flags & SWS_CPU_CAPS_MMX2) {
+        sws_init_swScale_MMX2(c);
+        return swScale_MMX2;
+    } else if (flags & SWS_CPU_CAPS_3DNOW) {
+        sws_init_swScale_3DNow(c);
+        return swScale_3DNow;
+    } else if (flags & SWS_CPU_CAPS_MMX) {
+        sws_init_swScale_MMX(c);
+        return swScale_MMX;
+    } else {
+        sws_init_swScale_C(c);
+        return swScale_C;
+    }
+
+#else
+#ifdef COMPILE_ALTIVEC
+    if (flags & SWS_CPU_CAPS_ALTIVEC) {
+        sws_init_swScale_altivec(c);
+        return swScale_altivec;
+    } else {
+        sws_init_swScale_C(c);
+        return swScale_C;
+    }
+#endif
+    sws_init_swScale_C(c);
+    return swScale_C;
+#endif /* ARCH_X86 */
+#else //CONFIG_RUNTIME_CPUDETECT
+#if   COMPILE_TEMPLATE_MMX2
+    sws_init_swScale_MMX2(c);
+    return swScale_MMX2;
+#elif COMPILE_TEMPLATE_AMD3DNOW
+    sws_init_swScale_3DNow(c);
+    return swScale_3DNow;
+#elif COMPILE_TEMPLATE_MMX
+    sws_init_swScale_MMX(c);
+    return swScale_MMX;
+#elif COMPILE_TEMPLATE_ALTIVEC
+    sws_init_swScale_altivec(c);
+    return swScale_altivec;
+#else
+    sws_init_swScale_C(c);
+    return swScale_C;
+#endif
+#endif //!CONFIG_RUNTIME_CPUDETECT
+}
+
+static int planarToNv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+    /* Copy Y plane */
+    if (dstStride[0]==srcStride[0] && srcStride[0] > 0)
+        memcpy(dst, src[0], srcSliceH*dstStride[0]);
+    else {
+        int i;
+        const uint8_t *srcPtr= src[0];
+        uint8_t *dstPtr= dst;
+        for (i=0; i<srcSliceH; i++) {
+            memcpy(dstPtr, srcPtr, c->srcW);
+            srcPtr+= srcStride[0];
+            dstPtr+= dstStride[0];
+        }
+    }
+    dst = dstParam[1] + dstStride[1]*srcSliceY/2;
+    if (c->dstFormat == PIX_FMT_NV12)
+        interleaveBytes(src[1], src[2], dst, c->srcW/2, srcSliceH/2, srcStride[1], srcStride[2], dstStride[0]);
+    else
+        interleaveBytes(src[2], src[1], dst, c->srcW/2, srcSliceH/2, srcStride[2], srcStride[1], dstStride[0]);
+
+    return srcSliceH;
+}
+
+static int planarToYuy2Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+    yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
+
+    return srcSliceH;
+}
+
+static int planarToUyvyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+    yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
+
+    return srcSliceH;
+}
+
+static int yuv422pToYuy2Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+    yuv422ptoyuy2(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
+
+    return srcSliceH;
+}
+
+static int yuv422pToUyvyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+
+    yuv422ptouyvy(src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0]);
+
+    return srcSliceH;
+}
+
+static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
+    uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY/2;
+    uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY/2;
+
+    yuyvtoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
+
+    if (dstParam[3])
+        fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
+
+    return srcSliceH;
+}
+
+static int yuyvToYuv422Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
+    uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY;
+    uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY;
+
+    yuyvtoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
+
+    return srcSliceH;
+}
+
+static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
+    uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY/2;
+    uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY/2;
+
+    uyvytoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
+
+    if (dstParam[3])
+        fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
+
+    return srcSliceH;
+}
+
+static int uyvyToYuv422Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                               int srcSliceH, uint8_t* dstParam[], int dstStride[])
+{
+    uint8_t *ydst=dstParam[0] + dstStride[0]*srcSliceY;
+    uint8_t *udst=dstParam[1] + dstStride[1]*srcSliceY;
+    uint8_t *vdst=dstParam[2] + dstStride[2]*srcSliceY;
+
+    uyvytoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
+
+    return srcSliceH;
+}
+
+static int palToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                           int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    const enum PixelFormat srcFormat= c->srcFormat;
+    const enum PixelFormat dstFormat= c->dstFormat;
+    void (*conv)(const uint8_t *src, uint8_t *dst, long num_pixels,
+                 const uint8_t *palette)=NULL;
+    int i;
+    uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+    const uint8_t *srcPtr= src[0];
+
+    if (usePal(srcFormat)) {
+        switch (dstFormat) {
+        case PIX_FMT_RGB32  : conv = palette8topacked32; break;
+        case PIX_FMT_BGR32  : conv = palette8topacked32; break;
+        case PIX_FMT_BGR32_1: conv = palette8topacked32; break;
+        case PIX_FMT_RGB32_1: conv = palette8topacked32; break;
+        case PIX_FMT_RGB24  : conv = palette8topacked24; break;
+        case PIX_FMT_BGR24  : conv = palette8topacked24; break;
+        }
+    }
+
+    if (!conv)
+        av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
+               sws_format_name(srcFormat), sws_format_name(dstFormat));
+    else {
+        for (i=0; i<srcSliceH; i++) {
+            conv(srcPtr, dstPtr, c->srcW, (uint8_t *) c->pal_rgb);
+            srcPtr+= srcStride[0];
+            dstPtr+= dstStride[0];
+        }
+    }
+
+    return srcSliceH;
+}
+
+#define isRGBA32(x) (            \
+           (x) == PIX_FMT_ARGB   \
+        || (x) == PIX_FMT_RGBA   \
+        || (x) == PIX_FMT_BGRA   \
+        || (x) == PIX_FMT_ABGR   \
+        )
+
+/* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
+static int rgbToRgbWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                           int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    const enum PixelFormat srcFormat= c->srcFormat;
+    const enum PixelFormat dstFormat= c->dstFormat;
+    const int srcBpp= (c->srcFormatBpp + 7) >> 3;
+    const int dstBpp= (c->dstFormatBpp + 7) >> 3;
+    const int srcId= c->srcFormatBpp >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */
+    const int dstId= c->dstFormatBpp >> 2;
+    void (*conv)(const uint8_t *src, uint8_t *dst, long src_size)=NULL;
+
+#define CONV_IS(src, dst) (srcFormat == PIX_FMT_##src && dstFormat == PIX_FMT_##dst)
+
+    if (isRGBA32(srcFormat) && isRGBA32(dstFormat)) {
+        if (     CONV_IS(ABGR, RGBA)
+              || CONV_IS(ARGB, BGRA)
+              || CONV_IS(BGRA, ARGB)
+              || CONV_IS(RGBA, ABGR)) conv = shuffle_bytes_3210;
+        else if (CONV_IS(ABGR, ARGB)
+              || CONV_IS(ARGB, ABGR)) conv = shuffle_bytes_0321;
+        else if (CONV_IS(ABGR, BGRA)
+              || CONV_IS(ARGB, RGBA)) conv = shuffle_bytes_1230;
+        else if (CONV_IS(BGRA, RGBA)
+              || CONV_IS(RGBA, BGRA)) conv = shuffle_bytes_2103;
+        else if (CONV_IS(BGRA, ABGR)
+              || CONV_IS(RGBA, ARGB)) conv = shuffle_bytes_3012;
+    } else
+    /* BGR -> BGR */
+    if (  (isBGRinInt(srcFormat) && isBGRinInt(dstFormat))
+       || (isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) {
+        switch(srcId | (dstId<<4)) {
+        case 0x34: conv= rgb16to15; break;
+        case 0x36: conv= rgb24to15; break;
+        case 0x38: conv= rgb32to15; break;
+        case 0x43: conv= rgb15to16; break;
+        case 0x46: conv= rgb24to16; break;
+        case 0x48: conv= rgb32to16; break;
+        case 0x63: conv= rgb15to24; break;
+        case 0x64: conv= rgb16to24; break;
+        case 0x68: conv= rgb32to24; break;
+        case 0x83: conv= rgb15to32; break;
+        case 0x84: conv= rgb16to32; break;
+        case 0x86: conv= rgb24to32; break;
+        }
+    } else if (  (isBGRinInt(srcFormat) && isRGBinInt(dstFormat))
+             || (isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) {
+        switch(srcId | (dstId<<4)) {
+        case 0x33: conv= rgb15tobgr15; break;
+        case 0x34: conv= rgb16tobgr15; break;
+        case 0x36: conv= rgb24tobgr15; break;
+        case 0x38: conv= rgb32tobgr15; break;
+        case 0x43: conv= rgb15tobgr16; break;
+        case 0x44: conv= rgb16tobgr16; break;
+        case 0x46: conv= rgb24tobgr16; break;
+        case 0x48: conv= rgb32tobgr16; break;
+        case 0x63: conv= rgb15tobgr24; break;
+        case 0x64: conv= rgb16tobgr24; break;
+        case 0x66: conv= rgb24tobgr24; break;
+        case 0x68: conv= rgb32tobgr24; break;
+        case 0x83: conv= rgb15tobgr32; break;
+        case 0x84: conv= rgb16tobgr32; break;
+        case 0x86: conv= rgb24tobgr32; break;
+        }
+    }
+
+    if (!conv) {
+        av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
+               sws_format_name(srcFormat), sws_format_name(dstFormat));
+    } else {
+        const uint8_t *srcPtr= src[0];
+              uint8_t *dstPtr= dst[0];
+        if ((srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1) && !isRGBA32(dstFormat))
+            srcPtr += ALT32_CORR;
+
+        if ((dstFormat == PIX_FMT_RGB32_1 || dstFormat == PIX_FMT_BGR32_1) && !isRGBA32(srcFormat))
+            dstPtr += ALT32_CORR;
+
+        if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0)
+            conv(srcPtr, dstPtr + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
+        else {
+            int i;
+            dstPtr += dstStride[0]*srcSliceY;
+
+            for (i=0; i<srcSliceH; i++) {
+                conv(srcPtr, dstPtr, c->srcW*srcBpp);
+                srcPtr+= srcStride[0];
+                dstPtr+= dstStride[0];
+            }
+        }
+    }
+    return srcSliceH;
+}
+
+static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                              int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    rgb24toyv12(
+        src[0],
+        dst[0]+ srcSliceY    *dstStride[0],
+        dst[1]+(srcSliceY>>1)*dstStride[1],
+        dst[2]+(srcSliceY>>1)*dstStride[2],
+        c->srcW, srcSliceH,
+        dstStride[0], dstStride[1], srcStride[0]);
+    if (dst[3])
+        fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
+    return srcSliceH;
+}
+
+static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                             int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int i;
+
+    /* copy Y */
+    if (srcStride[0]==dstStride[0] && srcStride[0] > 0)
+        memcpy(dst[0]+ srcSliceY*dstStride[0], src[0], srcStride[0]*srcSliceH);
+    else {
+        const uint8_t *srcPtr= src[0];
+        uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+
+        for (i=0; i<srcSliceH; i++) {
+            memcpy(dstPtr, srcPtr, c->srcW);
+            srcPtr+= srcStride[0];
+            dstPtr+= dstStride[0];
+        }
+    }
+
+    if (c->dstFormat==PIX_FMT_YUV420P || c->dstFormat==PIX_FMT_YUVA420P) {
+        planar2x(src[1], dst[1] + dstStride[1]*(srcSliceY >> 1), c->chrSrcW,
+                 srcSliceH >> 2, srcStride[1], dstStride[1]);
+        planar2x(src[2], dst[2] + dstStride[2]*(srcSliceY >> 1), c->chrSrcW,
+                 srcSliceH >> 2, srcStride[2], dstStride[2]);
+    } else {
+        planar2x(src[1], dst[2] + dstStride[2]*(srcSliceY >> 1), c->chrSrcW,
+                 srcSliceH >> 2, srcStride[1], dstStride[2]);
+        planar2x(src[2], dst[1] + dstStride[1]*(srcSliceY >> 1), c->chrSrcW,
+                 srcSliceH >> 2, srcStride[2], dstStride[1]);
+    }
+    if (dst[3])
+        fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
+    return srcSliceH;
+}
+
+/* unscaled copy like stuff (assumes nearly identical formats) */
+static int packedCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                             int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    if (dstStride[0]==srcStride[0] && srcStride[0] > 0)
+        memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], srcSliceH*dstStride[0]);
+    else {
+        int i;
+        const uint8_t *srcPtr= src[0];
+        uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+        int length=0;
+
+        /* universal length finder */
+        while(length+c->srcW <= FFABS(dstStride[0])
+           && length+c->srcW <= FFABS(srcStride[0])) length+= c->srcW;
+        assert(length!=0);
+
+        for (i=0; i<srcSliceH; i++) {
+            memcpy(dstPtr, srcPtr, length);
+            srcPtr+= srcStride[0];
+            dstPtr+= dstStride[0];
+        }
+    }
+    return srcSliceH;
+}
+
+static int planarCopyWrapper(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                             int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int plane, i, j;
+    for (plane=0; plane<4; plane++) {
+        int length= (plane==0 || plane==3) ? c->srcW  : -((-c->srcW  )>>c->chrDstHSubSample);
+        int y=      (plane==0 || plane==3) ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample);
+        int height= (plane==0 || plane==3) ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample);
+        const uint8_t *srcPtr= src[plane];
+        uint8_t *dstPtr= dst[plane] + dstStride[plane]*y;
+
+        if (!dst[plane]) continue;
+        // ignore palette for GRAY8
+        if (plane == 1 && !dst[2]) continue;
+        if (!src[plane] || (plane == 1 && !src[2])) {
+            if(is16BPS(c->dstFormat))
+                length*=2;
+            fillPlane(dst[plane], dstStride[plane], length, height, y, (plane==3) ? 255 : 128);
+        } else {
+            if(is16BPS(c->srcFormat) && !is16BPS(c->dstFormat)) {
+                if (!isBE(c->srcFormat)) srcPtr++;
+                for (i=0; i<height; i++) {
+                    for (j=0; j<length; j++) dstPtr[j] = srcPtr[j<<1];
+                    srcPtr+= srcStride[plane];
+                    dstPtr+= dstStride[plane];
+                }
+            } else if(!is16BPS(c->srcFormat) && is16BPS(c->dstFormat)) {
+                for (i=0; i<height; i++) {
+                    for (j=0; j<length; j++) {
+                        dstPtr[ j<<1   ] = srcPtr[j];
+                        dstPtr[(j<<1)+1] = srcPtr[j];
+                    }
+                    srcPtr+= srcStride[plane];
+                    dstPtr+= dstStride[plane];
+                }
+            } else if(is16BPS(c->srcFormat) && is16BPS(c->dstFormat)
+                  && isBE(c->srcFormat) != isBE(c->dstFormat)) {
+
+                for (i=0; i<height; i++) {
+                    for (j=0; j<length; j++)
+                        ((uint16_t*)dstPtr)[j] = bswap_16(((const uint16_t*)srcPtr)[j]);
+                    srcPtr+= srcStride[plane];
+                    dstPtr+= dstStride[plane];
+                }
+            } else if (dstStride[plane]==srcStride[plane] && srcStride[plane] > 0)
+                memcpy(dst[plane] + dstStride[plane]*y, src[plane], height*dstStride[plane]);
+            else {
+                if(is16BPS(c->srcFormat) && is16BPS(c->dstFormat))
+                    length*=2;
+                for (i=0; i<height; i++) {
+                    memcpy(dstPtr, srcPtr, length);
+                    srcPtr+= srcStride[plane];
+                    dstPtr+= dstStride[plane];
+                }
+            }
+        }
+    }
+    return srcSliceH;
+}
+
+int ff_hardcodedcpuflags(void)
+{
+    int flags = 0;
+#if   COMPILE_TEMPLATE_MMX2
+    flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2;
+#elif COMPILE_TEMPLATE_AMD3DNOW
+    flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_3DNOW;
+#elif COMPILE_TEMPLATE_MMX
+    flags |= SWS_CPU_CAPS_MMX;
+#elif COMPILE_TEMPLATE_ALTIVEC
+    flags |= SWS_CPU_CAPS_ALTIVEC;
+#elif ARCH_BFIN
+    flags |= SWS_CPU_CAPS_BFIN;
+#endif
+    return flags;
+}
+
+void ff_get_unscaled_swscale(SwsContext *c)
+{
+    const enum PixelFormat srcFormat = c->srcFormat;
+    const enum PixelFormat dstFormat = c->dstFormat;
+    const int flags = c->flags;
+    const int dstH = c->dstH;
+    int needsDither;
+
+    needsDither= isAnyRGB(dstFormat)
+        &&  c->dstFormatBpp < 24
+        && (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat)));
+
+    /* yv12_to_nv12 */
+    if ((srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21)) {
+        c->swScale= planarToNv12Wrapper;
+    }
+    /* yuv2bgr */
+    if ((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P || srcFormat==PIX_FMT_YUVA420P) && isAnyRGB(dstFormat)
+        && !(flags & SWS_ACCURATE_RND) && !(dstH&1)) {
+        c->swScale= ff_yuv2rgb_get_func_ptr(c);
+    }
+
+    if (srcFormat==PIX_FMT_YUV410P && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_BITEXACT)) {
+        c->swScale= yvu9ToYv12Wrapper;
+    }
+
+    /* bgr24toYV12 */
+    if (srcFormat==PIX_FMT_BGR24 && (dstFormat==PIX_FMT_YUV420P || dstFormat==PIX_FMT_YUVA420P) && !(flags & SWS_ACCURATE_RND))
+        c->swScale= bgr24ToYv12Wrapper;
+
+    /* RGB/BGR -> RGB/BGR (no dither needed forms) */
+    if (   isAnyRGB(srcFormat)
+        && isAnyRGB(dstFormat)
+        && srcFormat != PIX_FMT_BGR8      && dstFormat != PIX_FMT_BGR8
+        && srcFormat != PIX_FMT_RGB8      && dstFormat != PIX_FMT_RGB8
+        && srcFormat != PIX_FMT_BGR4      && dstFormat != PIX_FMT_BGR4
+        && srcFormat != PIX_FMT_RGB4      && dstFormat != PIX_FMT_RGB4
+        && srcFormat != PIX_FMT_BGR4_BYTE && dstFormat != PIX_FMT_BGR4_BYTE
+        && srcFormat != PIX_FMT_RGB4_BYTE && dstFormat != PIX_FMT_RGB4_BYTE
+        && srcFormat != PIX_FMT_MONOBLACK && dstFormat != PIX_FMT_MONOBLACK
+        && srcFormat != PIX_FMT_MONOWHITE && dstFormat != PIX_FMT_MONOWHITE
+        && srcFormat != PIX_FMT_RGB48LE   && dstFormat != PIX_FMT_RGB48LE
+        && srcFormat != PIX_FMT_RGB48BE   && dstFormat != PIX_FMT_RGB48BE
+        && (!needsDither || (c->flags&(SWS_FAST_BILINEAR|SWS_POINT))))
+        c->swScale= rgbToRgbWrapper;
+
+    if ((usePal(srcFormat) && (
+        dstFormat == PIX_FMT_RGB32   ||
+        dstFormat == PIX_FMT_RGB32_1 ||
+        dstFormat == PIX_FMT_RGB24   ||
+        dstFormat == PIX_FMT_BGR32   ||
+        dstFormat == PIX_FMT_BGR32_1 ||
+        dstFormat == PIX_FMT_BGR24)))
+        c->swScale= palToRgbWrapper;
+
+    if (srcFormat == PIX_FMT_YUV422P) {
+        if (dstFormat == PIX_FMT_YUYV422)
+            c->swScale= yuv422pToYuy2Wrapper;
+        else if (dstFormat == PIX_FMT_UYVY422)
+            c->swScale= yuv422pToUyvyWrapper;
+    }
+
+    /* LQ converters if -sws 0 or -sws 4*/
+    if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)) {
+        /* yv12_to_yuy2 */
+        if (srcFormat == PIX_FMT_YUV420P || srcFormat == PIX_FMT_YUVA420P) {
+            if (dstFormat == PIX_FMT_YUYV422)
+                c->swScale= planarToYuy2Wrapper;
+            else if (dstFormat == PIX_FMT_UYVY422)
+                c->swScale= planarToUyvyWrapper;
+        }
+    }
+    if(srcFormat == PIX_FMT_YUYV422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
+        c->swScale= yuyvToYuv420Wrapper;
+    if(srcFormat == PIX_FMT_UYVY422 && (dstFormat == PIX_FMT_YUV420P || dstFormat == PIX_FMT_YUVA420P))
+        c->swScale= uyvyToYuv420Wrapper;
+    if(srcFormat == PIX_FMT_YUYV422 && dstFormat == PIX_FMT_YUV422P)
+        c->swScale= yuyvToYuv422Wrapper;
+    if(srcFormat == PIX_FMT_UYVY422 && dstFormat == PIX_FMT_YUV422P)
+        c->swScale= uyvyToYuv422Wrapper;
+
+#ifdef COMPILE_ALTIVEC
+    if ((c->flags & SWS_CPU_CAPS_ALTIVEC) &&
+        !(c->flags & SWS_BITEXACT) &&
+        srcFormat == PIX_FMT_YUV420P) {
+        // unscaled YV12 -> packed YUV, we want speed
+        if (dstFormat == PIX_FMT_YUYV422)
+            c->swScale= yv12toyuy2_unscaled_altivec;
+        else if (dstFormat == PIX_FMT_UYVY422)
+            c->swScale= yv12touyvy_unscaled_altivec;
+    }
+#endif
+
+    /* simple copy */
+    if (  srcFormat == dstFormat
+        || (srcFormat == PIX_FMT_YUVA420P && dstFormat == PIX_FMT_YUV420P)
+        || (srcFormat == PIX_FMT_YUV420P && dstFormat == PIX_FMT_YUVA420P)
+        || (isPlanarYUV(srcFormat) && isGray(dstFormat))
+        || (isPlanarYUV(dstFormat) && isGray(srcFormat))
+        || (isGray(dstFormat) && isGray(srcFormat))
+        || (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat)
+            && c->chrDstHSubSample == c->chrSrcHSubSample
+            && c->chrDstVSubSample == c->chrSrcVSubSample
+            && dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21
+            && srcFormat != PIX_FMT_NV12 && srcFormat != PIX_FMT_NV21))
+    {
+        if (isPacked(c->srcFormat))
+            c->swScale= packedCopyWrapper;
+        else /* Planar YUV or gray */
+            c->swScale= planarCopyWrapper;
+    }
+#if ARCH_BFIN
+    if (flags & SWS_CPU_CAPS_BFIN)
+        ff_bfin_get_unscaled_swscale (c);
+#endif
+}
+
+static void reset_ptr(const uint8_t* src[], int format)
+{
+    if(!isALPHA(format))
+        src[3]=NULL;
+    if(!isPlanarYUV(format)) {
+        src[3]=src[2]=NULL;
+
+        if (!usePal(format))
+            src[1]= NULL;
+    }
+}
+
+/**
+ * swscale wrapper, so we don't need to export the SwsContext.
+ * Assumes planar YUV to be in YUV order instead of YVU.
+ */
+int sws_scale(SwsContext *c, const uint8_t* const src[], const int srcStride[], int srcSliceY,
+              int srcSliceH, uint8_t* const dst[], const int dstStride[])
+{
+    int i;
+    const uint8_t* src2[4]= {src[0], src[1], src[2], src[3]};
+    uint8_t* dst2[4]= {dst[0], dst[1], dst[2], dst[3]};
+
+    // do not mess up sliceDir if we have a "trailing" 0-size slice
+    if (srcSliceH == 0)
+        return 0;
+
+    if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
+        av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
+        return 0;
+    }
+    if (c->sliceDir == 0) {
+        if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
+    }
+
+    if (usePal(c->srcFormat)) {
+        for (i=0; i<256; i++) {
+            int p, r, g, b,y,u,v;
+            if(c->srcFormat == PIX_FMT_PAL8) {
+                p=((const uint32_t*)(src[1]))[i];
+                r= (p>>16)&0xFF;
+                g= (p>> 8)&0xFF;
+                b=  p     &0xFF;
+            } else if(c->srcFormat == PIX_FMT_RGB8) {
+                r= (i>>5    )*36;
+                g= ((i>>2)&7)*36;
+                b= (i&3     )*85;
+            } else if(c->srcFormat == PIX_FMT_BGR8) {
+                b= (i>>6    )*85;
+                g= ((i>>3)&7)*36;
+                r= (i&7     )*36;
+            } else if(c->srcFormat == PIX_FMT_RGB4_BYTE) {
+                r= (i>>3    )*255;
+                g= ((i>>1)&3)*85;
+                b= (i&1     )*255;
+            } else if(c->srcFormat == PIX_FMT_GRAY8) {
+                r = g = b = i;
+            } else {
+                assert(c->srcFormat == PIX_FMT_BGR4_BYTE);
+                b= (i>>3    )*255;
+                g= ((i>>1)&3)*85;
+                r= (i&1     )*255;
+            }
+            y= av_clip_uint8((RY*r + GY*g + BY*b + ( 33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+            u= av_clip_uint8((RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+            v= av_clip_uint8((RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+            c->pal_yuv[i]= y + (u<<8) + (v<<16);
+
+            switch(c->dstFormat) {
+            case PIX_FMT_BGR32:
+#if !HAVE_BIGENDIAN
+            case PIX_FMT_RGB24:
+#endif
+                c->pal_rgb[i]=  r + (g<<8) + (b<<16);
+                break;
+            case PIX_FMT_BGR32_1:
+#if HAVE_BIGENDIAN
+            case PIX_FMT_BGR24:
+#endif
+                c->pal_rgb[i]= (r + (g<<8) + (b<<16)) << 8;
+                break;
+            case PIX_FMT_RGB32_1:
+#if HAVE_BIGENDIAN
+            case PIX_FMT_RGB24:
+#endif
+                c->pal_rgb[i]= (b + (g<<8) + (r<<16)) << 8;
+                break;
+            case PIX_FMT_RGB32:
+#if !HAVE_BIGENDIAN
+            case PIX_FMT_BGR24:
+#endif
+            default:
+                c->pal_rgb[i]=  b + (g<<8) + (r<<16);
+            }
+        }
+    }
+
+    // copy strides, so they can safely be modified
+    if (c->sliceDir == 1) {
+        // slices go from top to bottom
+        int srcStride2[4]= {srcStride[0], srcStride[1], srcStride[2], srcStride[3]};
+        int dstStride2[4]= {dstStride[0], dstStride[1], dstStride[2], dstStride[3]};
+
+        reset_ptr(src2, c->srcFormat);
+        reset_ptr((const uint8_t**)dst2, c->dstFormat);
+
+        /* reset slice direction at end of frame */
+        if (srcSliceY + srcSliceH == c->srcH)
+            c->sliceDir = 0;
+
+        return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2, dstStride2);
+    } else {
+        // slices go from bottom to top => we flip the image internally
+        int srcStride2[4]= {-srcStride[0], -srcStride[1], -srcStride[2], -srcStride[3]};
+        int dstStride2[4]= {-dstStride[0], -dstStride[1], -dstStride[2], -dstStride[3]};
+
+        src2[0] += (srcSliceH-1)*srcStride[0];
+        if (!usePal(c->srcFormat))
+            src2[1] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1];
+        src2[2] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2];
+        src2[3] += (srcSliceH-1)*srcStride[3];
+        dst2[0] += ( c->dstH                      -1)*dstStride[0];
+        dst2[1] += ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1];
+        dst2[2] += ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2];
+        dst2[3] += ( c->dstH                      -1)*dstStride[3];
+
+        reset_ptr(src2, c->srcFormat);
+        reset_ptr((const uint8_t**)dst2, c->dstFormat);
+
+        /* reset slice direction at end of frame */
+        if (!srcSliceY)
+            c->sliceDir = 0;
+
+        return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2);
+    }
+}
+
+#if LIBSWSCALE_VERSION_MAJOR < 1
+int sws_scale_ordered(SwsContext *c, const uint8_t* const src[], int srcStride[], int srcSliceY,
+                      int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    return sws_scale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
+}
+#endif

Added: branches/0.6/libswscale/swscale.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/swscale.h	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWSCALE_SWSCALE_H
+#define SWSCALE_SWSCALE_H
+
+/**
+ * @file
+ * @brief
+ *     external api for the swscale stuff
+ */
+
+#include "libavutil/avutil.h"
+
+#define LIBSWSCALE_VERSION_MAJOR 0
+#define LIBSWSCALE_VERSION_MINOR 10
+#define LIBSWSCALE_VERSION_MICRO 0
+
+#define LIBSWSCALE_VERSION_INT  AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \
+                                               LIBSWSCALE_VERSION_MINOR, \
+                                               LIBSWSCALE_VERSION_MICRO)
+#define LIBSWSCALE_VERSION      AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \
+                                           LIBSWSCALE_VERSION_MINOR, \
+                                           LIBSWSCALE_VERSION_MICRO)
+#define LIBSWSCALE_BUILD        LIBSWSCALE_VERSION_INT
+
+#define LIBSWSCALE_IDENT        "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION)
+
+/**
+ * Returns the LIBSWSCALE_VERSION_INT constant.
+ */
+unsigned swscale_version(void);
+
+/**
+ * Returns the libswscale build-time configuration.
+ */
+const char *swscale_configuration(void);
+
+/**
+ * Returns the libswscale license.
+ */
+const char *swscale_license(void);
+
+/* values for the flags, the stuff on the command line is different */
+#define SWS_FAST_BILINEAR     1
+#define SWS_BILINEAR          2
+#define SWS_BICUBIC           4
+#define SWS_X                 8
+#define SWS_POINT          0x10
+#define SWS_AREA           0x20
+#define SWS_BICUBLIN       0x40
+#define SWS_GAUSS          0x80
+#define SWS_SINC          0x100
+#define SWS_LANCZOS       0x200
+#define SWS_SPLINE        0x400
+
+#define SWS_SRC_V_CHR_DROP_MASK     0x30000
+#define SWS_SRC_V_CHR_DROP_SHIFT    16
+
+#define SWS_PARAM_DEFAULT           123456
+
+#define SWS_PRINT_INFO              0x1000
+
+//the following 3 flags are not completely implemented
+//internal chrominace subsampling info
+#define SWS_FULL_CHR_H_INT    0x2000
+//input subsampling info
+#define SWS_FULL_CHR_H_INP    0x4000
+#define SWS_DIRECT_BGR        0x8000
+#define SWS_ACCURATE_RND      0x40000
+#define SWS_BITEXACT          0x80000
+
+#define SWS_CPU_CAPS_MMX      0x80000000
+#define SWS_CPU_CAPS_MMX2     0x20000000
+#define SWS_CPU_CAPS_3DNOW    0x40000000
+#define SWS_CPU_CAPS_ALTIVEC  0x10000000
+#define SWS_CPU_CAPS_BFIN     0x01000000
+
+#define SWS_MAX_REDUCE_CUTOFF 0.002
+
+#define SWS_CS_ITU709         1
+#define SWS_CS_FCC            4
+#define SWS_CS_ITU601         5
+#define SWS_CS_ITU624         5
+#define SWS_CS_SMPTE170M      5
+#define SWS_CS_SMPTE240M      7
+#define SWS_CS_DEFAULT        5
+
+/**
+ * Returns a pointer to yuv<->rgb coefficients for the given colorspace
+ * suitable for sws_setColorspaceDetails().
+ *
+ * @param colorspace One of the SWS_CS_* macros. If invalid,
+ * SWS_CS_DEFAULT is used.
+ */
+const int *sws_getCoefficients(int colorspace);
+
+
+// when used for filters they must have an odd number of elements
+// coeffs cannot be shared between vectors
+typedef struct {
+    double *coeff;              ///< pointer to the list of coefficients
+    int length;                 ///< number of coefficients in the vector
+} SwsVector;
+
+// vectors can be shared
+typedef struct {
+    SwsVector *lumH;
+    SwsVector *lumV;
+    SwsVector *chrH;
+    SwsVector *chrV;
+} SwsFilter;
+
+struct SwsContext;
+
+/**
+ * Returns a positive value if pix_fmt is a supported input format, 0
+ * otherwise.
+ */
+int sws_isSupportedInput(enum PixelFormat pix_fmt);
+
+/**
+ * Returns a positive value if pix_fmt is a supported output format, 0
+ * otherwise.
+ */
+int sws_isSupportedOutput(enum PixelFormat pix_fmt);
+
+/**
+ * Frees the swscaler context swsContext.
+ * If swsContext is NULL, then does nothing.
+ */
+void sws_freeContext(struct SwsContext *swsContext);
+
+/**
+ * Allocates and returns a SwsContext. You need it to perform
+ * scaling/conversion operations using sws_scale().
+ *
+ * @param srcW the width of the source image
+ * @param srcH the height of the source image
+ * @param srcFormat the source image format
+ * @param dstW the width of the destination image
+ * @param dstH the height of the destination image
+ * @param dstFormat the destination image format
+ * @param flags specify which algorithm and options to use for rescaling
+ * @return a pointer to an allocated context, or NULL in case of error
+ */
+struct SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat,
+                                  int dstW, int dstH, enum PixelFormat dstFormat,
+                                  int flags, SwsFilter *srcFilter,
+                                  SwsFilter *dstFilter, const double *param);
+
+/**
+ * Scales the image slice in srcSlice and puts the resulting scaled
+ * slice in the image in dst. A slice is a sequence of consecutive
+ * rows in an image.
+ *
+ * Slices have to be provided in sequential order, either in
+ * top-bottom or bottom-top order. If slices are provided in
+ * non-sequential order the behavior of the function is undefined.
+ *
+ * @param context   the scaling context previously created with
+ *                  sws_getContext()
+ * @param srcSlice  the array containing the pointers to the planes of
+ *                  the source slice
+ * @param srcStride the array containing the strides for each plane of
+ *                  the source image
+ * @param srcSliceY the position in the source image of the slice to
+ *                  process, that is the number (counted starting from
+ *                  zero) in the image of the first row of the slice
+ * @param srcSliceH the height of the source slice, that is the number
+ *                  of rows in the slice
+ * @param dst       the array containing the pointers to the planes of
+ *                  the destination image
+ * @param dstStride the array containing the strides for each plane of
+ *                  the destination image
+ * @return          the height of the output slice
+ */
+int sws_scale(struct SwsContext *context, const uint8_t* const srcSlice[], const int srcStride[],
+              int srcSliceY, int srcSliceH, uint8_t* const dst[], const int dstStride[]);
+#if LIBSWSCALE_VERSION_MAJOR < 1
+/**
+ * @deprecated Use sws_scale() instead.
+ */
+int sws_scale_ordered(struct SwsContext *context, const uint8_t* const src[],
+                      int srcStride[], int srcSliceY, int srcSliceH,
+                      uint8_t* dst[], int dstStride[]) attribute_deprecated;
+#endif
+
+/**
+ * @param inv_table the yuv2rgb coefficients, normally ff_yuv2rgb_coeffs[x]
+ * @param fullRange if 1 then the luma range is 0..255 if 0 it is 16..235
+ * @return -1 if not supported
+ */
+int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
+                             int srcRange, const int table[4], int dstRange,
+                             int brightness, int contrast, int saturation);
+
+/**
+ * @return -1 if not supported
+ */
+int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
+                             int *srcRange, int **table, int *dstRange,
+                             int *brightness, int *contrast, int *saturation);
+
+/**
+ * Allocates and returns an uninitialized vector with length coefficients.
+ */
+SwsVector *sws_allocVec(int length);
+
+/**
+ * Returns a normalized Gaussian curve used to filter stuff
+ * quality=3 is high quality, lower is lower quality.
+ */
+SwsVector *sws_getGaussianVec(double variance, double quality);
+
+/**
+ * Allocates and returns a vector with length coefficients, all
+ * with the same value c.
+ */
+SwsVector *sws_getConstVec(double c, int length);
+
+/**
+ * Allocates and returns a vector with just one coefficient, with
+ * value 1.0.
+ */
+SwsVector *sws_getIdentityVec(void);
+
+/**
+ * Scales all the coefficients of a by the scalar value.
+ */
+void sws_scaleVec(SwsVector *a, double scalar);
+
+/**
+ * Scales all the coefficients of a so that their sum equals height.
+ */
+void sws_normalizeVec(SwsVector *a, double height);
+void sws_convVec(SwsVector *a, SwsVector *b);
+void sws_addVec(SwsVector *a, SwsVector *b);
+void sws_subVec(SwsVector *a, SwsVector *b);
+void sws_shiftVec(SwsVector *a, int shift);
+
+/**
+ * Allocates and returns a clone of the vector a, that is a vector
+ * with the same coefficients as a.
+ */
+SwsVector *sws_cloneVec(SwsVector *a);
+
+#if LIBSWSCALE_VERSION_MAJOR < 1
+/**
+ * @deprecated Use sws_printVec2() instead.
+ */
+attribute_deprecated void sws_printVec(SwsVector *a);
+#endif
+
+/**
+ * Prints with av_log() a textual representation of the vector a
+ * if log_level <= av_log_level.
+ */
+void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);
+
+void sws_freeVec(SwsVector *a);
+
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+                                float lumaSharpen, float chromaSharpen,
+                                float chromaHShift, float chromaVShift,
+                                int verbose);
+void sws_freeFilter(SwsFilter *filter);
+
+/**
+ * Checks if context can be reused, otherwise reallocates a new
+ * one.
+ *
+ * If context is NULL, just calls sws_getContext() to get a new
+ * context. Otherwise, checks if the parameters are the ones already
+ * saved in context. If that is the case, returns the current
+ * context. Otherwise, frees context and gets a new context with
+ * the new parameters.
+ *
+ * Be warned that srcFilter and dstFilter are not checked, they
+ * are assumed to remain the same.
+ */
+struct SwsContext *sws_getCachedContext(struct SwsContext *context,
+                                        int srcW, int srcH, enum PixelFormat srcFormat,
+                                        int dstW, int dstH, enum PixelFormat dstFormat,
+                                        int flags, SwsFilter *srcFilter,
+                                        SwsFilter *dstFilter, const double *param);
+
+#endif /* SWSCALE_SWSCALE_H */

Added: branches/0.6/libswscale/swscale_internal.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/swscale_internal.h	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,469 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWSCALE_SWSCALE_INTERNAL_H
+#define SWSCALE_SWSCALE_INTERNAL_H
+
+#include "config.h"
+
+#if HAVE_ALTIVEC_H
+#include <altivec.h>
+#endif
+
+#include "libavutil/avutil.h"
+
+#define STR(s)         AV_TOSTRING(s) //AV_STRINGIFY is too long
+
+#define MAX_FILTER_SIZE 256
+
+#if ARCH_X86
+#define VOFW 5120
+#else
+#define VOFW 2048 // faster on PPC and not tested on others
+#endif
+
+#define VOF  (VOFW*2)
+
+#if HAVE_BIGENDIAN
+#define ALT32_CORR (-1)
+#else
+#define ALT32_CORR   1
+#endif
+
+#if ARCH_X86_64
+#   define APCK_PTR2 8
+#   define APCK_COEF 16
+#   define APCK_SIZE 24
+#else
+#   define APCK_PTR2 4
+#   define APCK_COEF 8
+#   define APCK_SIZE 16
+#endif
+
+struct SwsContext;
+
+typedef int (*SwsFunc)(struct SwsContext *context, const uint8_t* src[],
+                       int srcStride[], int srcSliceY, int srcSliceH,
+                       uint8_t* dst[], int dstStride[]);
+
+/* This struct should be aligned on at least a 32-byte boundary. */
+typedef struct SwsContext {
+    /**
+     * info on struct for av_log
+     */
+    const AVClass *av_class;
+
+    /**
+     * Note that src, dst, srcStride, dstStride will be copied in the
+     * sws_scale() wrapper so they can be freely modified here.
+     */
+    SwsFunc swScale;
+    int srcW;                     ///< Width  of source      luma/alpha planes.
+    int srcH;                     ///< Height of source      luma/alpha planes.
+    int dstH;                     ///< Height of destination luma/alpha planes.
+    int chrSrcW;                  ///< Width  of source      chroma     planes.
+    int chrSrcH;                  ///< Height of source      chroma     planes.
+    int chrDstW;                  ///< Width  of destination chroma     planes.
+    int chrDstH;                  ///< Height of destination chroma     planes.
+    int lumXInc, chrXInc;
+    int lumYInc, chrYInc;
+    enum PixelFormat dstFormat;   ///< Destination pixel format.
+    enum PixelFormat srcFormat;   ///< Source      pixel format.
+    int dstFormatBpp;             ///< Number of bits per pixel of the destination pixel format.
+    int srcFormatBpp;             ///< Number of bits per pixel of the source      pixel format.
+    int chrSrcHSubSample;         ///< Binary logarithm of horizontal subsampling factor between luma/alpha and chroma planes in source      image.
+    int chrSrcVSubSample;         ///< Binary logarithm of vertical   subsampling factor between luma/alpha and chroma planes in source      image.
+    int chrDstHSubSample;         ///< Binary logarithm of horizontal subsampling factor between luma/alpha and chroma planes in destination image.
+    int chrDstVSubSample;         ///< Binary logarithm of vertical   subsampling factor between luma/alpha and chroma planes in destination image.
+    int vChrDrop;                 ///< Binary logarithm of extra vertical subsampling factor in source image chroma planes specified by user.
+    int sliceDir;                 ///< Direction that slices are fed to the scaler (1 = top-to-bottom, -1 = bottom-to-top).
+    double param[2];              ///< Input parameters for scaling algorithms that need them.
+
+    uint32_t pal_yuv[256];
+    uint32_t pal_rgb[256];
+
+    /**
+     * @name Scaled horizontal lines ring buffer.
+     * The horizontal scaler keeps just enough scaled lines in a ring buffer
+     * so they may be passed to the vertical scaler. The pointers to the
+     * allocated buffers for each line are duplicated in sequence in the ring
+     * buffer to simplify indexing and avoid wrapping around between lines
+     * inside the vertical scaler code. The wrapping is done before the
+     * vertical scaler is called.
+     */
+    //@{
+    int16_t **lumPixBuf;          ///< Ring buffer for scaled horizontal luma   plane lines to be fed to the vertical scaler.
+    int16_t **chrPixBuf;          ///< Ring buffer for scaled horizontal chroma plane lines to be fed to the vertical scaler.
+    int16_t **alpPixBuf;          ///< Ring buffer for scaled horizontal alpha  plane lines to be fed to the vertical scaler.
+    int       vLumBufSize;        ///< Number of vertical luma/alpha lines allocated in the ring buffer.
+    int       vChrBufSize;        ///< Number of vertical chroma     lines allocated in the ring buffer.
+    int       lastInLumBuf;       ///< Last scaled horizontal luma/alpha line from source in the ring buffer.
+    int       lastInChrBuf;       ///< Last scaled horizontal chroma     line from source in the ring buffer.
+    int       lumBufIndex;        ///< Index in ring buffer of the last scaled horizontal luma/alpha line from source.
+    int       chrBufIndex;        ///< Index in ring buffer of the last scaled horizontal chroma     line from source.
+    //@}
+
+    uint8_t formatConvBuffer[VOF]; //FIXME dynamic allocation, but we have to change a lot of code for this to be useful
+
+    /**
+     * @name Horizontal and vertical filters.
+     * To better understand the following fields, here is a pseudo-code of
+     * their usage in filtering a horizontal line:
+     * @code
+     * for (i = 0; i < width; i++) {
+     *     dst[i] = 0;
+     *     for (j = 0; j < filterSize; j++)
+     *         dst[i] += src[ filterPos[i] + j ] * filter[ filterSize * i + j ];
+     *     dst[i] >>= FRAC_BITS; // The actual implementation is fixed-point.
+     * }
+     * @endcode
+     */
+    //@{
+    int16_t *hLumFilter;          ///< Array of horizontal filter coefficients for luma/alpha planes.
+    int16_t *hChrFilter;          ///< Array of horizontal filter coefficients for chroma     planes.
+    int16_t *vLumFilter;          ///< Array of vertical   filter coefficients for luma/alpha planes.
+    int16_t *vChrFilter;          ///< Array of vertical   filter coefficients for chroma     planes.
+    int16_t *hLumFilterPos;       ///< Array of horizontal filter starting positions for each dst[i] for luma/alpha planes.
+    int16_t *hChrFilterPos;       ///< Array of horizontal filter starting positions for each dst[i] for chroma     planes.
+    int16_t *vLumFilterPos;       ///< Array of vertical   filter starting positions for each dst[i] for luma/alpha planes.
+    int16_t *vChrFilterPos;       ///< Array of vertical   filter starting positions for each dst[i] for chroma     planes.
+    int      hLumFilterSize;      ///< Horizontal filter size for luma/alpha pixels.
+    int      hChrFilterSize;      ///< Horizontal filter size for chroma     pixels.
+    int      vLumFilterSize;      ///< Vertical   filter size for luma/alpha pixels.
+    int      vChrFilterSize;      ///< Vertical   filter size for chroma     pixels.
+    //@}
+
+    int lumMmx2FilterCodeSize;    ///< Runtime-generated MMX2 horizontal fast bilinear scaler code size for luma/alpha planes.
+    int chrMmx2FilterCodeSize;    ///< Runtime-generated MMX2 horizontal fast bilinear scaler code size for chroma     planes.
+    uint8_t *lumMmx2FilterCode;   ///< Runtime-generated MMX2 horizontal fast bilinear scaler code for luma/alpha planes.
+    uint8_t *chrMmx2FilterCode;   ///< Runtime-generated MMX2 horizontal fast bilinear scaler code for chroma     planes.
+
+    int canMMX2BeUsed;
+
+    int dstY;                     ///< Last destination vertical line output from last slice.
+    int flags;                    ///< Flags passed by the user to select scaler algorithm, optimizations, subsampling, etc...
+    void * yuvTable;            // pointer to the yuv->rgb table start so it can be freed()
+    uint8_t * table_rV[256];
+    uint8_t * table_gU[256];
+    int    table_gV[256];
+    uint8_t * table_bU[256];
+
+    //Colorspace stuff
+    int contrast, brightness, saturation;    // for sws_getColorspaceDetails
+    int srcColorspaceTable[4];
+    int dstColorspaceTable[4];
+    int srcRange;                 ///< 0 = MPG YUV range, 1 = JPG YUV range (source      image).
+    int dstRange;                 ///< 0 = MPG YUV range, 1 = JPG YUV range (destination image).
+    int yuv2rgb_y_offset;
+    int yuv2rgb_y_coeff;
+    int yuv2rgb_v2r_coeff;
+    int yuv2rgb_v2g_coeff;
+    int yuv2rgb_u2g_coeff;
+    int yuv2rgb_u2b_coeff;
+
+#define RED_DITHER            "0*8"
+#define GREEN_DITHER          "1*8"
+#define BLUE_DITHER           "2*8"
+#define Y_COEFF               "3*8"
+#define VR_COEFF              "4*8"
+#define UB_COEFF              "5*8"
+#define VG_COEFF              "6*8"
+#define UG_COEFF              "7*8"
+#define Y_OFFSET              "8*8"
+#define U_OFFSET              "9*8"
+#define V_OFFSET              "10*8"
+#define LUM_MMX_FILTER_OFFSET "11*8"
+#define CHR_MMX_FILTER_OFFSET "11*8+4*4*256"
+#define DSTW_OFFSET           "11*8+4*4*256*2" //do not change, it is hardcoded in the ASM
+#define ESP_OFFSET            "11*8+4*4*256*2+8"
+#define VROUNDER_OFFSET       "11*8+4*4*256*2+16"
+#define U_TEMP                "11*8+4*4*256*2+24"
+#define V_TEMP                "11*8+4*4*256*2+32"
+#define Y_TEMP                "11*8+4*4*256*2+40"
+#define ALP_MMX_FILTER_OFFSET "11*8+4*4*256*2+48"
+
+    DECLARE_ALIGNED(8, uint64_t, redDither);
+    DECLARE_ALIGNED(8, uint64_t, greenDither);
+    DECLARE_ALIGNED(8, uint64_t, blueDither);
+
+    DECLARE_ALIGNED(8, uint64_t, yCoeff);
+    DECLARE_ALIGNED(8, uint64_t, vrCoeff);
+    DECLARE_ALIGNED(8, uint64_t, ubCoeff);
+    DECLARE_ALIGNED(8, uint64_t, vgCoeff);
+    DECLARE_ALIGNED(8, uint64_t, ugCoeff);
+    DECLARE_ALIGNED(8, uint64_t, yOffset);
+    DECLARE_ALIGNED(8, uint64_t, uOffset);
+    DECLARE_ALIGNED(8, uint64_t, vOffset);
+    int32_t  lumMmxFilter[4*MAX_FILTER_SIZE];
+    int32_t  chrMmxFilter[4*MAX_FILTER_SIZE];
+    int dstW;                     ///< Width  of destination luma/alpha planes.
+    DECLARE_ALIGNED(8, uint64_t, esp);
+    DECLARE_ALIGNED(8, uint64_t, vRounder);
+    DECLARE_ALIGNED(8, uint64_t, u_temp);
+    DECLARE_ALIGNED(8, uint64_t, v_temp);
+    DECLARE_ALIGNED(8, uint64_t, y_temp);
+    int32_t  alpMmxFilter[4*MAX_FILTER_SIZE];
+
+#if HAVE_ALTIVEC
+    vector signed short   CY;
+    vector signed short   CRV;
+    vector signed short   CBU;
+    vector signed short   CGU;
+    vector signed short   CGV;
+    vector signed short   OY;
+    vector unsigned short CSHIFT;
+    vector signed short   *vYCoeffsBank, *vCCoeffsBank;
+#endif
+
+#if ARCH_BFIN
+    DECLARE_ALIGNED(4, uint32_t, oy);
+    DECLARE_ALIGNED(4, uint32_t, oc);
+    DECLARE_ALIGNED(4, uint32_t, zero);
+    DECLARE_ALIGNED(4, uint32_t, cy);
+    DECLARE_ALIGNED(4, uint32_t, crv);
+    DECLARE_ALIGNED(4, uint32_t, rmask);
+    DECLARE_ALIGNED(4, uint32_t, cbu);
+    DECLARE_ALIGNED(4, uint32_t, bmask);
+    DECLARE_ALIGNED(4, uint32_t, cgu);
+    DECLARE_ALIGNED(4, uint32_t, cgv);
+    DECLARE_ALIGNED(4, uint32_t, gmask);
+#endif
+
+#if HAVE_VIS
+    DECLARE_ALIGNED(8, uint64_t, sparc_coeffs)[10];
+#endif
+
+    /* function pointers for swScale() */
+    void (*yuv2nv12X  )(struct SwsContext *c,
+                        const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                        const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                        uint8_t *dest, uint8_t *uDest,
+                        int dstW, int chrDstW, int dstFormat);
+    void (*yuv2yuv1   )(struct SwsContext *c,
+                        const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
+                        uint8_t *dest,
+                        uint8_t *uDest, uint8_t *vDest, uint8_t *aDest,
+                        long dstW, long chrDstW);
+    void (*yuv2yuvX   )(struct SwsContext *c,
+                        const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                        const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                        const int16_t **alpSrc,
+                        uint8_t *dest,
+                        uint8_t *uDest, uint8_t *vDest, uint8_t *aDest,
+                        long dstW, long chrDstW);
+    void (*yuv2packed1)(struct SwsContext *c,
+                        const uint16_t *buf0,
+                        const uint16_t *uvbuf0, const uint16_t *uvbuf1,
+                        const uint16_t *abuf0,
+                        uint8_t *dest,
+                        int dstW, int uvalpha, int dstFormat, int flags, int y);
+    void (*yuv2packed2)(struct SwsContext *c,
+                        const uint16_t *buf0, const uint16_t *buf1,
+                        const uint16_t *uvbuf0, const uint16_t *uvbuf1,
+                        const uint16_t *abuf0, const uint16_t *abuf1,
+                        uint8_t *dest,
+                        int dstW, int yalpha, int uvalpha, int y);
+    void (*yuv2packedX)(struct SwsContext *c,
+                        const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                        const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                        const int16_t **alpSrc, uint8_t *dest,
+                        long dstW, long dstY);
+
+    void (*lumToYV12)(uint8_t *dst, const uint8_t *src,
+                      long width, uint32_t *pal); ///< Unscaled conversion of luma plane to YV12 for horizontal scaler.
+    void (*alpToYV12)(uint8_t *dst, const uint8_t *src,
+                      long width, uint32_t *pal); ///< Unscaled conversion of alpha plane to YV12 for horizontal scaler.
+    void (*chrToYV12)(uint8_t *dstU, uint8_t *dstV,
+                      const uint8_t *src1, const uint8_t *src2,
+                      long width, uint32_t *pal); ///< Unscaled conversion of chroma planes to YV12 for horizontal scaler.
+    void (*hyscale_fast)(struct SwsContext *c,
+                         int16_t *dst, long dstWidth,
+                         const uint8_t *src, int srcW, int xInc);
+    void (*hcscale_fast)(struct SwsContext *c,
+                         int16_t *dst, long dstWidth,
+                         const uint8_t *src1, const uint8_t *src2,
+                         int srcW, int xInc);
+
+    void (*hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW,
+                   int xInc, const int16_t *filter, const int16_t *filterPos,
+                   long filterSize);
+
+    void (*lumConvertRange)(uint16_t *dst, int width); ///< Color range conversion function for luma plane if needed.
+    void (*chrConvertRange)(uint16_t *dst, int width); ///< Color range conversion function for chroma planes if needed.
+
+    int lumSrcOffset; ///< Offset given to luma src pointers passed to horizontal input functions.
+    int chrSrcOffset; ///< Offset given to chroma src pointers passed to horizontal input functions.
+    int alpSrcOffset; ///< Offset given to alpha src pointers passed to horizontal input functions.
+
+    int needs_hcscale; ///< Set if there are chroma planes to be converted.
+
+} SwsContext;
+//FIXME check init (where 0)
+
+SwsFunc ff_yuv2rgb_get_func_ptr(SwsContext *c);
+int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4],
+                             int fullRange, int brightness,
+                             int contrast, int saturation);
+
+void ff_yuv2rgb_init_tables_altivec(SwsContext *c, const int inv_table[4],
+                                    int brightness, int contrast, int saturation);
+SwsFunc ff_yuv2rgb_init_mmx(SwsContext *c);
+SwsFunc ff_yuv2rgb_init_vis(SwsContext *c);
+SwsFunc ff_yuv2rgb_init_mlib(SwsContext *c);
+SwsFunc ff_yuv2rgb_init_altivec(SwsContext *c);
+SwsFunc ff_yuv2rgb_get_func_ptr_bfin(SwsContext *c);
+void ff_bfin_get_unscaled_swscale(SwsContext *c);
+void ff_yuv2packedX_altivec(SwsContext *c,
+                            const int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+                            const int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+                            uint8_t *dest, int dstW, int dstY);
+
+const char *sws_format_name(enum PixelFormat format);
+
+//FIXME replace this with something faster
+#define is16BPS(x)      (           \
+           (x)==PIX_FMT_GRAY16BE    \
+        || (x)==PIX_FMT_GRAY16LE    \
+        || (x)==PIX_FMT_RGB48BE     \
+        || (x)==PIX_FMT_RGB48LE     \
+        || (x)==PIX_FMT_YUV420P16LE   \
+        || (x)==PIX_FMT_YUV422P16LE   \
+        || (x)==PIX_FMT_YUV444P16LE   \
+        || (x)==PIX_FMT_YUV420P16BE   \
+        || (x)==PIX_FMT_YUV422P16BE   \
+        || (x)==PIX_FMT_YUV444P16BE   \
+    )
+#define isBE(x) ((x)&1)
+#define isPlanar8YUV(x) (           \
+           (x)==PIX_FMT_YUV410P     \
+        || (x)==PIX_FMT_YUV420P     \
+        || (x)==PIX_FMT_YUVA420P    \
+        || (x)==PIX_FMT_YUV411P     \
+        || (x)==PIX_FMT_YUV422P     \
+        || (x)==PIX_FMT_YUV444P     \
+        || (x)==PIX_FMT_YUV440P     \
+        || (x)==PIX_FMT_NV12        \
+        || (x)==PIX_FMT_NV21        \
+    )
+#define isPlanarYUV(x)  (           \
+        isPlanar8YUV(x)             \
+        || (x)==PIX_FMT_YUV420P16LE   \
+        || (x)==PIX_FMT_YUV422P16LE   \
+        || (x)==PIX_FMT_YUV444P16LE   \
+        || (x)==PIX_FMT_YUV420P16BE   \
+        || (x)==PIX_FMT_YUV422P16BE   \
+        || (x)==PIX_FMT_YUV444P16BE   \
+    )
+#define isYUV(x)        (           \
+           (x)==PIX_FMT_UYVY422     \
+        || (x)==PIX_FMT_YUYV422     \
+        || isPlanarYUV(x)           \
+    )
+#define isGray(x)       (           \
+           (x)==PIX_FMT_GRAY8       \
+        || (x)==PIX_FMT_GRAY16BE    \
+        || (x)==PIX_FMT_GRAY16LE    \
+    )
+#define isGray16(x)     (           \
+           (x)==PIX_FMT_GRAY16BE    \
+        || (x)==PIX_FMT_GRAY16LE    \
+    )
+#define isRGBinInt(x)   (           \
+           (x)==PIX_FMT_RGB48BE     \
+        || (x)==PIX_FMT_RGB48LE     \
+        || (x)==PIX_FMT_RGB32       \
+        || (x)==PIX_FMT_RGB32_1     \
+        || (x)==PIX_FMT_RGB24       \
+        || (x)==PIX_FMT_RGB565BE    \
+        || (x)==PIX_FMT_RGB565LE    \
+        || (x)==PIX_FMT_RGB555BE    \
+        || (x)==PIX_FMT_RGB555LE    \
+        || (x)==PIX_FMT_RGB444BE    \
+        || (x)==PIX_FMT_RGB444LE    \
+        || (x)==PIX_FMT_RGB8        \
+        || (x)==PIX_FMT_RGB4        \
+        || (x)==PIX_FMT_RGB4_BYTE   \
+        || (x)==PIX_FMT_MONOBLACK   \
+        || (x)==PIX_FMT_MONOWHITE   \
+    )
+#define isBGRinInt(x)   (           \
+           (x)==PIX_FMT_BGR32       \
+        || (x)==PIX_FMT_BGR32_1     \
+        || (x)==PIX_FMT_BGR24       \
+        || (x)==PIX_FMT_BGR565BE    \
+        || (x)==PIX_FMT_BGR565LE    \
+        || (x)==PIX_FMT_BGR555BE    \
+        || (x)==PIX_FMT_BGR555LE    \
+        || (x)==PIX_FMT_BGR444BE    \
+        || (x)==PIX_FMT_BGR444LE    \
+        || (x)==PIX_FMT_BGR8        \
+        || (x)==PIX_FMT_BGR4        \
+        || (x)==PIX_FMT_BGR4_BYTE   \
+        || (x)==PIX_FMT_MONOBLACK   \
+        || (x)==PIX_FMT_MONOWHITE   \
+    )
+#define isRGBinBytes(x) (           \
+           (x)==PIX_FMT_RGB48BE     \
+        || (x)==PIX_FMT_RGB48LE     \
+        || (x)==PIX_FMT_RGBA        \
+        || (x)==PIX_FMT_ARGB        \
+        || (x)==PIX_FMT_RGB24       \
+    )
+#define isBGRinBytes(x) (           \
+           (x)==PIX_FMT_BGRA        \
+        || (x)==PIX_FMT_ABGR        \
+        || (x)==PIX_FMT_BGR24       \
+    )
+#define isAnyRGB(x)     (           \
+            isRGBinInt(x)           \
+        ||  isBGRinInt(x)           \
+    )
+#define isALPHA(x)      (           \
+           (x)==PIX_FMT_BGR32       \
+        || (x)==PIX_FMT_BGR32_1     \
+        || (x)==PIX_FMT_RGB32       \
+        || (x)==PIX_FMT_RGB32_1     \
+        || (x)==PIX_FMT_YUVA420P    \
+    )
+#define usePal(x) (av_pix_fmt_descriptors[x].flags & PIX_FMT_PAL)
+
+extern const uint64_t ff_dither4[2];
+extern const uint64_t ff_dither8[2];
+
+extern const AVClass sws_context_class;
+
+/**
+ * Sets c->swScale to an unscaled converter if one exists for the specific
+ * source and destination formats, bit depths, flags, etc.
+ */
+void ff_get_unscaled_swscale(SwsContext *c);
+
+/**
+ * Returns the SWS_CPU_CAPS for the optimized code compiled into swscale.
+ */
+int ff_hardcodedcpuflags(void);
+
+/**
+ * Returns function pointer to fastest main scaler path function depending
+ * on architecture and available optimizations.
+ */
+SwsFunc ff_getSwsFunc(SwsContext *c);
+
+#endif /* SWSCALE_SWSCALE_INTERNAL_H */

Added: branches/0.6/libswscale/swscale_template.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/swscale_template.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,3066 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef REAL_MOVNTQ
+#undef MOVNTQ
+#undef PAVGB
+#undef PREFETCH
+
+#if COMPILE_TEMPLATE_AMD3DNOW
+#define PREFETCH  "prefetch"
+#elif COMPILE_TEMPLATE_MMX2
+#define PREFETCH "prefetchnta"
+#else
+#define PREFETCH  " # nop"
+#endif
+
+#if COMPILE_TEMPLATE_MMX2
+#define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
+#elif COMPILE_TEMPLATE_AMD3DNOW
+#define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
+#endif
+
+#if COMPILE_TEMPLATE_MMX2
+#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
+#else
+#define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
+#endif
+#define MOVNTQ(a,b)  REAL_MOVNTQ(a,b)
+
+#if COMPILE_TEMPLATE_ALTIVEC
+#include "ppc/swscale_altivec_template.c"
+#endif
+
+#define YSCALEYUV2YV12X(x, offset, dest, width) \
+    __asm__ volatile(\
+        "xor                          %%"REG_a", %%"REG_a"  \n\t"\
+        "movq             "VROUNDER_OFFSET"(%0), %%mm3      \n\t"\
+        "movq                             %%mm3, %%mm4      \n\t"\
+        "lea                     " offset "(%0), %%"REG_d"  \n\t"\
+        "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
+        ASMALIGN(4) /* FIXME Unroll? */\
+        "1:                                                 \n\t"\
+        "movq                      8(%%"REG_d"), %%mm0      \n\t" /* filterCoeff */\
+        "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm2      \n\t" /* srcData */\
+        "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm5      \n\t" /* srcData */\
+        "add                                $16, %%"REG_d"  \n\t"\
+        "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
+        "test                         %%"REG_S", %%"REG_S"  \n\t"\
+        "pmulhw                           %%mm0, %%mm2      \n\t"\
+        "pmulhw                           %%mm0, %%mm5      \n\t"\
+        "paddw                            %%mm2, %%mm3      \n\t"\
+        "paddw                            %%mm5, %%mm4      \n\t"\
+        " jnz                                1b             \n\t"\
+        "psraw                               $3, %%mm3      \n\t"\
+        "psraw                               $3, %%mm4      \n\t"\
+        "packuswb                         %%mm4, %%mm3      \n\t"\
+        MOVNTQ(%%mm3, (%1, %%REGa))\
+        "add                                 $8, %%"REG_a"  \n\t"\
+        "cmp                                 %2, %%"REG_a"  \n\t"\
+        "movq             "VROUNDER_OFFSET"(%0), %%mm3      \n\t"\
+        "movq                             %%mm3, %%mm4      \n\t"\
+        "lea                     " offset "(%0), %%"REG_d"  \n\t"\
+        "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
+        "jb                                  1b             \n\t"\
+        :: "r" (&c->redDither),\
+        "r" (dest), "g" (width)\
+        : "%"REG_a, "%"REG_d, "%"REG_S\
+    );
+
+#define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
+    __asm__ volatile(\
+        "lea                     " offset "(%0), %%"REG_d"  \n\t"\
+        "xor                          %%"REG_a", %%"REG_a"  \n\t"\
+        "pxor                             %%mm4, %%mm4      \n\t"\
+        "pxor                             %%mm5, %%mm5      \n\t"\
+        "pxor                             %%mm6, %%mm6      \n\t"\
+        "pxor                             %%mm7, %%mm7      \n\t"\
+        "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
+        ASMALIGN(4) \
+        "1:                                                 \n\t"\
+        "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm0      \n\t" /* srcData */\
+        "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm2      \n\t" /* srcData */\
+        "mov        "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"  \n\t"\
+        "movq   "  x "(%%"REG_S", %%"REG_a", 2), %%mm1      \n\t" /* srcData */\
+        "movq                             %%mm0, %%mm3      \n\t"\
+        "punpcklwd                        %%mm1, %%mm0      \n\t"\
+        "punpckhwd                        %%mm1, %%mm3      \n\t"\
+        "movq       "STR(APCK_COEF)"(%%"REG_d"), %%mm1      \n\t" /* filterCoeff */\
+        "pmaddwd                          %%mm1, %%mm0      \n\t"\
+        "pmaddwd                          %%mm1, %%mm3      \n\t"\
+        "paddd                            %%mm0, %%mm4      \n\t"\
+        "paddd                            %%mm3, %%mm5      \n\t"\
+        "movq 8+"  x "(%%"REG_S", %%"REG_a", 2), %%mm3      \n\t" /* srcData */\
+        "mov        "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"  \n\t"\
+        "add                  $"STR(APCK_SIZE)", %%"REG_d"  \n\t"\
+        "test                         %%"REG_S", %%"REG_S"  \n\t"\
+        "movq                             %%mm2, %%mm0      \n\t"\
+        "punpcklwd                        %%mm3, %%mm2      \n\t"\
+        "punpckhwd                        %%mm3, %%mm0      \n\t"\
+        "pmaddwd                          %%mm1, %%mm2      \n\t"\
+        "pmaddwd                          %%mm1, %%mm0      \n\t"\
+        "paddd                            %%mm2, %%mm6      \n\t"\
+        "paddd                            %%mm0, %%mm7      \n\t"\
+        " jnz                                1b             \n\t"\
+        "psrad                              $16, %%mm4      \n\t"\
+        "psrad                              $16, %%mm5      \n\t"\
+        "psrad                              $16, %%mm6      \n\t"\
+        "psrad                              $16, %%mm7      \n\t"\
+        "movq             "VROUNDER_OFFSET"(%0), %%mm0      \n\t"\
+        "packssdw                         %%mm5, %%mm4      \n\t"\
+        "packssdw                         %%mm7, %%mm6      \n\t"\
+        "paddw                            %%mm0, %%mm4      \n\t"\
+        "paddw                            %%mm0, %%mm6      \n\t"\
+        "psraw                               $3, %%mm4      \n\t"\
+        "psraw                               $3, %%mm6      \n\t"\
+        "packuswb                         %%mm6, %%mm4      \n\t"\
+        MOVNTQ(%%mm4, (%1, %%REGa))\
+        "add                                 $8, %%"REG_a"  \n\t"\
+        "cmp                                 %2, %%"REG_a"  \n\t"\
+        "lea                     " offset "(%0), %%"REG_d"  \n\t"\
+        "pxor                             %%mm4, %%mm4      \n\t"\
+        "pxor                             %%mm5, %%mm5      \n\t"\
+        "pxor                             %%mm6, %%mm6      \n\t"\
+        "pxor                             %%mm7, %%mm7      \n\t"\
+        "mov                        (%%"REG_d"), %%"REG_S"  \n\t"\
+        "jb                                  1b             \n\t"\
+        :: "r" (&c->redDither),\
+        "r" (dest), "g" (width)\
+        : "%"REG_a, "%"REG_d, "%"REG_S\
+    );
+
+#define YSCALEYUV2YV121 \
+    "mov %2, %%"REG_a"                    \n\t"\
+    ASMALIGN(4) /* FIXME Unroll? */\
+    "1:                                   \n\t"\
+    "movq  (%0, %%"REG_a", 2), %%mm0      \n\t"\
+    "movq 8(%0, %%"REG_a", 2), %%mm1      \n\t"\
+    "psraw                 $7, %%mm0      \n\t"\
+    "psraw                 $7, %%mm1      \n\t"\
+    "packuswb           %%mm1, %%mm0      \n\t"\
+    MOVNTQ(%%mm0, (%1, %%REGa))\
+    "add                   $8, %%"REG_a"  \n\t"\
+    "jnc                   1b             \n\t"
+
+#define YSCALEYUV2YV121_ACCURATE \
+    "mov %2, %%"REG_a"                    \n\t"\
+    "pcmpeqw %%mm7, %%mm7                 \n\t"\
+    "psrlw                 $15, %%mm7     \n\t"\
+    "psllw                  $6, %%mm7     \n\t"\
+    ASMALIGN(4) /* FIXME Unroll? */\
+    "1:                                   \n\t"\
+    "movq  (%0, %%"REG_a", 2), %%mm0      \n\t"\
+    "movq 8(%0, %%"REG_a", 2), %%mm1      \n\t"\
+    "paddsw             %%mm7, %%mm0      \n\t"\
+    "paddsw             %%mm7, %%mm1      \n\t"\
+    "psraw                 $7, %%mm0      \n\t"\
+    "psraw                 $7, %%mm1      \n\t"\
+    "packuswb           %%mm1, %%mm0      \n\t"\
+    MOVNTQ(%%mm0, (%1, %%REGa))\
+    "add                   $8, %%"REG_a"  \n\t"\
+    "jnc                   1b             \n\t"
+
+/*
+    :: "m" (-lumFilterSize), "m" (-chrFilterSize),
+       "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
+       "r" (dest), "m" (dstW),
+       "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
+    : "%eax", "%ebx", "%ecx", "%edx", "%esi"
+*/
+#define YSCALEYUV2PACKEDX_UV \
+    __asm__ volatile(\
+        "xor                   %%"REG_a", %%"REG_a"     \n\t"\
+        ASMALIGN(4)\
+        "nop                                            \n\t"\
+        "1:                                             \n\t"\
+        "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"     \n\t"\
+        "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
+        "movq      "VROUNDER_OFFSET"(%0), %%mm3         \n\t"\
+        "movq                      %%mm3, %%mm4         \n\t"\
+        ASMALIGN(4)\
+        "2:                                             \n\t"\
+        "movq               8(%%"REG_d"), %%mm0         \n\t" /* filterCoeff */\
+        "movq     (%%"REG_S", %%"REG_a"), %%mm2         \n\t" /* UsrcData */\
+        "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5         \n\t" /* VsrcData */\
+        "add                         $16, %%"REG_d"     \n\t"\
+        "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
+        "pmulhw                    %%mm0, %%mm2         \n\t"\
+        "pmulhw                    %%mm0, %%mm5         \n\t"\
+        "paddw                     %%mm2, %%mm3         \n\t"\
+        "paddw                     %%mm5, %%mm4         \n\t"\
+        "test                  %%"REG_S", %%"REG_S"     \n\t"\
+        " jnz                         2b                \n\t"\
+
+#define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
+    "lea                "offset"(%0), %%"REG_d"     \n\t"\
+    "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
+    "movq      "VROUNDER_OFFSET"(%0), "#dst1"       \n\t"\
+    "movq                    "#dst1", "#dst2"       \n\t"\
+    ASMALIGN(4)\
+    "2:                                             \n\t"\
+    "movq               8(%%"REG_d"), "#coeff"      \n\t" /* filterCoeff */\
+    "movq  (%%"REG_S", %%"REG_a", 2), "#src1"       \n\t" /* Y1srcData */\
+    "movq 8(%%"REG_S", %%"REG_a", 2), "#src2"       \n\t" /* Y2srcData */\
+    "add                         $16, %%"REG_d"            \n\t"\
+    "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
+    "pmulhw                 "#coeff", "#src1"       \n\t"\
+    "pmulhw                 "#coeff", "#src2"       \n\t"\
+    "paddw                   "#src1", "#dst1"       \n\t"\
+    "paddw                   "#src2", "#dst2"       \n\t"\
+    "test                  %%"REG_S", %%"REG_S"     \n\t"\
+    " jnz                         2b                \n\t"\
+
+#define YSCALEYUV2PACKEDX \
+    YSCALEYUV2PACKEDX_UV \
+    YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
+
+#define YSCALEYUV2PACKEDX_END                     \
+        :: "r" (&c->redDither),                   \
+            "m" (dummy), "m" (dummy), "m" (dummy),\
+            "r" (dest), "m" (dstW)                \
+        : "%"REG_a, "%"REG_d, "%"REG_S            \
+    );
+
+#define YSCALEYUV2PACKEDX_ACCURATE_UV \
+    __asm__ volatile(\
+        "xor %%"REG_a", %%"REG_a"                       \n\t"\
+        ASMALIGN(4)\
+        "nop                                            \n\t"\
+        "1:                                             \n\t"\
+        "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"     \n\t"\
+        "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
+        "pxor                      %%mm4, %%mm4         \n\t"\
+        "pxor                      %%mm5, %%mm5         \n\t"\
+        "pxor                      %%mm6, %%mm6         \n\t"\
+        "pxor                      %%mm7, %%mm7         \n\t"\
+        ASMALIGN(4)\
+        "2:                                             \n\t"\
+        "movq     (%%"REG_S", %%"REG_a"), %%mm0         \n\t" /* UsrcData */\
+        "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2         \n\t" /* VsrcData */\
+        "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"     \n\t"\
+        "movq     (%%"REG_S", %%"REG_a"), %%mm1         \n\t" /* UsrcData */\
+        "movq                      %%mm0, %%mm3         \n\t"\
+        "punpcklwd                 %%mm1, %%mm0         \n\t"\
+        "punpckhwd                 %%mm1, %%mm3         \n\t"\
+        "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1         \n\t" /* filterCoeff */\
+        "pmaddwd                   %%mm1, %%mm0         \n\t"\
+        "pmaddwd                   %%mm1, %%mm3         \n\t"\
+        "paddd                     %%mm0, %%mm4         \n\t"\
+        "paddd                     %%mm3, %%mm5         \n\t"\
+        "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3         \n\t" /* VsrcData */\
+        "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"     \n\t"\
+        "add           $"STR(APCK_SIZE)", %%"REG_d"     \n\t"\
+        "test                  %%"REG_S", %%"REG_S"     \n\t"\
+        "movq                      %%mm2, %%mm0         \n\t"\
+        "punpcklwd                 %%mm3, %%mm2         \n\t"\
+        "punpckhwd                 %%mm3, %%mm0         \n\t"\
+        "pmaddwd                   %%mm1, %%mm2         \n\t"\
+        "pmaddwd                   %%mm1, %%mm0         \n\t"\
+        "paddd                     %%mm2, %%mm6         \n\t"\
+        "paddd                     %%mm0, %%mm7         \n\t"\
+        " jnz                         2b                \n\t"\
+        "psrad                       $16, %%mm4         \n\t"\
+        "psrad                       $16, %%mm5         \n\t"\
+        "psrad                       $16, %%mm6         \n\t"\
+        "psrad                       $16, %%mm7         \n\t"\
+        "movq      "VROUNDER_OFFSET"(%0), %%mm0         \n\t"\
+        "packssdw                  %%mm5, %%mm4         \n\t"\
+        "packssdw                  %%mm7, %%mm6         \n\t"\
+        "paddw                     %%mm0, %%mm4         \n\t"\
+        "paddw                     %%mm0, %%mm6         \n\t"\
+        "movq                      %%mm4, "U_TEMP"(%0)  \n\t"\
+        "movq                      %%mm6, "V_TEMP"(%0)  \n\t"\
+
+#define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
+    "lea                "offset"(%0), %%"REG_d"     \n\t"\
+    "mov                 (%%"REG_d"), %%"REG_S"     \n\t"\
+    "pxor                      %%mm1, %%mm1         \n\t"\
+    "pxor                      %%mm5, %%mm5         \n\t"\
+    "pxor                      %%mm7, %%mm7         \n\t"\
+    "pxor                      %%mm6, %%mm6         \n\t"\
+    ASMALIGN(4)\
+    "2:                                             \n\t"\
+    "movq  (%%"REG_S", %%"REG_a", 2), %%mm0         \n\t" /* Y1srcData */\
+    "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2         \n\t" /* Y2srcData */\
+    "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S"     \n\t"\
+    "movq  (%%"REG_S", %%"REG_a", 2), %%mm4         \n\t" /* Y1srcData */\
+    "movq                      %%mm0, %%mm3         \n\t"\
+    "punpcklwd                 %%mm4, %%mm0         \n\t"\
+    "punpckhwd                 %%mm4, %%mm3         \n\t"\
+    "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4         \n\t" /* filterCoeff */\
+    "pmaddwd                   %%mm4, %%mm0         \n\t"\
+    "pmaddwd                   %%mm4, %%mm3         \n\t"\
+    "paddd                     %%mm0, %%mm1         \n\t"\
+    "paddd                     %%mm3, %%mm5         \n\t"\
+    "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3         \n\t" /* Y2srcData */\
+    "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S"     \n\t"\
+    "add           $"STR(APCK_SIZE)", %%"REG_d"     \n\t"\
+    "test                  %%"REG_S", %%"REG_S"     \n\t"\
+    "movq                      %%mm2, %%mm0         \n\t"\
+    "punpcklwd                 %%mm3, %%mm2         \n\t"\
+    "punpckhwd                 %%mm3, %%mm0         \n\t"\
+    "pmaddwd                   %%mm4, %%mm2         \n\t"\
+    "pmaddwd                   %%mm4, %%mm0         \n\t"\
+    "paddd                     %%mm2, %%mm7         \n\t"\
+    "paddd                     %%mm0, %%mm6         \n\t"\
+    " jnz                         2b                \n\t"\
+    "psrad                       $16, %%mm1         \n\t"\
+    "psrad                       $16, %%mm5         \n\t"\
+    "psrad                       $16, %%mm7         \n\t"\
+    "psrad                       $16, %%mm6         \n\t"\
+    "movq      "VROUNDER_OFFSET"(%0), %%mm0         \n\t"\
+    "packssdw                  %%mm5, %%mm1         \n\t"\
+    "packssdw                  %%mm6, %%mm7         \n\t"\
+    "paddw                     %%mm0, %%mm1         \n\t"\
+    "paddw                     %%mm0, %%mm7         \n\t"\
+    "movq               "U_TEMP"(%0), %%mm3         \n\t"\
+    "movq               "V_TEMP"(%0), %%mm4         \n\t"\
+
+#define YSCALEYUV2PACKEDX_ACCURATE \
+    YSCALEYUV2PACKEDX_ACCURATE_UV \
+    YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
+
+#define YSCALEYUV2RGBX \
+    "psubw  "U_OFFSET"(%0), %%mm3       \n\t" /* (U-128)8*/\
+    "psubw  "V_OFFSET"(%0), %%mm4       \n\t" /* (V-128)8*/\
+    "movq            %%mm3, %%mm2       \n\t" /* (U-128)8*/\
+    "movq            %%mm4, %%mm5       \n\t" /* (V-128)8*/\
+    "pmulhw "UG_COEFF"(%0), %%mm3       \n\t"\
+    "pmulhw "VG_COEFF"(%0), %%mm4       \n\t"\
+    /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+    "pmulhw "UB_COEFF"(%0), %%mm2       \n\t"\
+    "pmulhw "VR_COEFF"(%0), %%mm5       \n\t"\
+    "psubw  "Y_OFFSET"(%0), %%mm1       \n\t" /* 8(Y-16)*/\
+    "psubw  "Y_OFFSET"(%0), %%mm7       \n\t" /* 8(Y-16)*/\
+    "pmulhw  "Y_COEFF"(%0), %%mm1       \n\t"\
+    "pmulhw  "Y_COEFF"(%0), %%mm7       \n\t"\
+    /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+    "paddw           %%mm3, %%mm4       \n\t"\
+    "movq            %%mm2, %%mm0       \n\t"\
+    "movq            %%mm5, %%mm6       \n\t"\
+    "movq            %%mm4, %%mm3       \n\t"\
+    "punpcklwd       %%mm2, %%mm2       \n\t"\
+    "punpcklwd       %%mm5, %%mm5       \n\t"\
+    "punpcklwd       %%mm4, %%mm4       \n\t"\
+    "paddw           %%mm1, %%mm2       \n\t"\
+    "paddw           %%mm1, %%mm5       \n\t"\
+    "paddw           %%mm1, %%mm4       \n\t"\
+    "punpckhwd       %%mm0, %%mm0       \n\t"\
+    "punpckhwd       %%mm6, %%mm6       \n\t"\
+    "punpckhwd       %%mm3, %%mm3       \n\t"\
+    "paddw           %%mm7, %%mm0       \n\t"\
+    "paddw           %%mm7, %%mm6       \n\t"\
+    "paddw           %%mm7, %%mm3       \n\t"\
+    /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+    "packuswb        %%mm0, %%mm2       \n\t"\
+    "packuswb        %%mm6, %%mm5       \n\t"\
+    "packuswb        %%mm3, %%mm4       \n\t"\
+
+#define REAL_YSCALEYUV2PACKED(index, c) \
+    "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0              \n\t"\
+    "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1              \n\t"\
+    "psraw                $3, %%mm0                           \n\t"\
+    "psraw                $3, %%mm1                           \n\t"\
+    "movq              %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
+    "movq              %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
+    "xor            "#index", "#index"                        \n\t"\
+    ASMALIGN(4)\
+    "1:                                 \n\t"\
+    "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
+    "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
+    "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
+    "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
+    "psubw             %%mm3, %%mm2     \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+    "psubw             %%mm4, %%mm5     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+    "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0    \n\t"\
+    "pmulhw            %%mm0, %%mm2     \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+    "pmulhw            %%mm0, %%mm5     \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+    "psraw                $7, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+    "psraw                $7, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+    "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+    "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+    "movq  (%0, "#index", 2), %%mm0     \n\t" /*buf0[eax]*/\
+    "movq  (%1, "#index", 2), %%mm1     \n\t" /*buf1[eax]*/\
+    "movq 8(%0, "#index", 2), %%mm6     \n\t" /*buf0[eax]*/\
+    "movq 8(%1, "#index", 2), %%mm7     \n\t" /*buf1[eax]*/\
+    "psubw             %%mm1, %%mm0     \n\t" /* buf0[eax] - buf1[eax]*/\
+    "psubw             %%mm7, %%mm6     \n\t" /* buf0[eax] - buf1[eax]*/\
+    "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+    "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+    "psraw                $7, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+    "psraw                $7, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+    "paddw             %%mm0, %%mm1     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+    "paddw             %%mm6, %%mm7     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+
+#define YSCALEYUV2PACKED(index, c)  REAL_YSCALEYUV2PACKED(index, c)
+
+#define REAL_YSCALEYUV2RGB_UV(index, c) \
+    "xor            "#index", "#index"  \n\t"\
+    ASMALIGN(4)\
+    "1:                                 \n\t"\
+    "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
+    "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
+    "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
+    "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
+    "psubw             %%mm3, %%mm2     \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+    "psubw             %%mm4, %%mm5     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+    "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0    \n\t"\
+    "pmulhw            %%mm0, %%mm2     \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+    "pmulhw            %%mm0, %%mm5     \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+    "psraw                $4, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+    "psraw                $4, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+    "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+    "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+    "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
+    "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
+    "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
+    "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
+    "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
+    "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
+    /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+
+#define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
+    "movq  ("#b1", "#index", 2), %%mm0     \n\t" /*buf0[eax]*/\
+    "movq  ("#b2", "#index", 2), %%mm1     \n\t" /*buf1[eax]*/\
+    "movq 8("#b1", "#index", 2), %%mm6     \n\t" /*buf0[eax]*/\
+    "movq 8("#b2", "#index", 2), %%mm7     \n\t" /*buf1[eax]*/\
+    "psubw             %%mm1, %%mm0     \n\t" /* buf0[eax] - buf1[eax]*/\
+    "psubw             %%mm7, %%mm6     \n\t" /* buf0[eax] - buf1[eax]*/\
+    "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+    "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6  \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+    "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+    "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+    "paddw             %%mm0, %%mm1     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+    "paddw             %%mm6, %%mm7     \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+
+#define REAL_YSCALEYUV2RGB_COEFF(c) \
+    "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
+    "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
+    "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
+    "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
+    "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
+    "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
+    /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+    "paddw             %%mm3, %%mm4     \n\t"\
+    "movq              %%mm2, %%mm0     \n\t"\
+    "movq              %%mm5, %%mm6     \n\t"\
+    "movq              %%mm4, %%mm3     \n\t"\
+    "punpcklwd         %%mm2, %%mm2     \n\t"\
+    "punpcklwd         %%mm5, %%mm5     \n\t"\
+    "punpcklwd         %%mm4, %%mm4     \n\t"\
+    "paddw             %%mm1, %%mm2     \n\t"\
+    "paddw             %%mm1, %%mm5     \n\t"\
+    "paddw             %%mm1, %%mm4     \n\t"\
+    "punpckhwd         %%mm0, %%mm0     \n\t"\
+    "punpckhwd         %%mm6, %%mm6     \n\t"\
+    "punpckhwd         %%mm3, %%mm3     \n\t"\
+    "paddw             %%mm7, %%mm0     \n\t"\
+    "paddw             %%mm7, %%mm6     \n\t"\
+    "paddw             %%mm7, %%mm3     \n\t"\
+    /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+    "packuswb          %%mm0, %%mm2     \n\t"\
+    "packuswb          %%mm6, %%mm5     \n\t"\
+    "packuswb          %%mm3, %%mm4     \n\t"\
+
+#define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
+
+#define YSCALEYUV2RGB(index, c) \
+    REAL_YSCALEYUV2RGB_UV(index, c) \
+    REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
+    REAL_YSCALEYUV2RGB_COEFF(c)
+
+#define REAL_YSCALEYUV2PACKED1(index, c) \
+    "xor            "#index", "#index"  \n\t"\
+    ASMALIGN(4)\
+    "1:                                 \n\t"\
+    "movq     (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
+    "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4     \n\t" /* uvbuf0[eax+2048]*/\
+    "psraw                $7, %%mm3     \n\t" \
+    "psraw                $7, %%mm4     \n\t" \
+    "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
+    "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
+    "psraw                $7, %%mm1     \n\t" \
+    "psraw                $7, %%mm7     \n\t" \
+
+#define YSCALEYUV2PACKED1(index, c)  REAL_YSCALEYUV2PACKED1(index, c)
+
+#define REAL_YSCALEYUV2RGB1(index, c) \
+    "xor            "#index", "#index"  \n\t"\
+    ASMALIGN(4)\
+    "1:                                 \n\t"\
+    "movq     (%2, "#index"), %%mm3     \n\t" /* uvbuf0[eax]*/\
+    "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4     \n\t" /* uvbuf0[eax+2048]*/\
+    "psraw                $4, %%mm3     \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+    "psraw                $4, %%mm4     \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+    "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
+    "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
+    "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
+    "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
+    "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
+    "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
+    /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+    "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
+    "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
+    "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+    "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+    "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
+    "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
+    "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
+    "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
+    "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
+    "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
+    /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+    "paddw             %%mm3, %%mm4     \n\t"\
+    "movq              %%mm2, %%mm0     \n\t"\
+    "movq              %%mm5, %%mm6     \n\t"\
+    "movq              %%mm4, %%mm3     \n\t"\
+    "punpcklwd         %%mm2, %%mm2     \n\t"\
+    "punpcklwd         %%mm5, %%mm5     \n\t"\
+    "punpcklwd         %%mm4, %%mm4     \n\t"\
+    "paddw             %%mm1, %%mm2     \n\t"\
+    "paddw             %%mm1, %%mm5     \n\t"\
+    "paddw             %%mm1, %%mm4     \n\t"\
+    "punpckhwd         %%mm0, %%mm0     \n\t"\
+    "punpckhwd         %%mm6, %%mm6     \n\t"\
+    "punpckhwd         %%mm3, %%mm3     \n\t"\
+    "paddw             %%mm7, %%mm0     \n\t"\
+    "paddw             %%mm7, %%mm6     \n\t"\
+    "paddw             %%mm7, %%mm3     \n\t"\
+    /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+    "packuswb          %%mm0, %%mm2     \n\t"\
+    "packuswb          %%mm6, %%mm5     \n\t"\
+    "packuswb          %%mm3, %%mm4     \n\t"\
+
+#define YSCALEYUV2RGB1(index, c)  REAL_YSCALEYUV2RGB1(index, c)
+
+#define REAL_YSCALEYUV2PACKED1b(index, c) \
+    "xor "#index", "#index"             \n\t"\
+    ASMALIGN(4)\
+    "1:                                 \n\t"\
+    "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
+    "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
+    "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
+    "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
+    "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+    "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+    "psrlw                $8, %%mm3     \n\t" \
+    "psrlw                $8, %%mm4     \n\t" \
+    "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
+    "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
+    "psraw                $7, %%mm1     \n\t" \
+    "psraw                $7, %%mm7     \n\t"
+#define YSCALEYUV2PACKED1b(index, c)  REAL_YSCALEYUV2PACKED1b(index, c)
+
+// do vertical chrominance interpolation
+#define REAL_YSCALEYUV2RGB1b(index, c) \
+    "xor            "#index", "#index"  \n\t"\
+    ASMALIGN(4)\
+    "1:                                 \n\t"\
+    "movq     (%2, "#index"), %%mm2     \n\t" /* uvbuf0[eax]*/\
+    "movq     (%3, "#index"), %%mm3     \n\t" /* uvbuf1[eax]*/\
+    "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5     \n\t" /* uvbuf0[eax+2048]*/\
+    "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4     \n\t" /* uvbuf1[eax+2048]*/\
+    "paddw             %%mm2, %%mm3     \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+    "paddw             %%mm5, %%mm4     \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+    "psrlw                $5, %%mm3     \n\t" /*FIXME might overflow*/\
+    "psrlw                $5, %%mm4     \n\t" /*FIXME might overflow*/\
+    "psubw  "U_OFFSET"("#c"), %%mm3     \n\t" /* (U-128)8*/\
+    "psubw  "V_OFFSET"("#c"), %%mm4     \n\t" /* (V-128)8*/\
+    "movq              %%mm3, %%mm2     \n\t" /* (U-128)8*/\
+    "movq              %%mm4, %%mm5     \n\t" /* (V-128)8*/\
+    "pmulhw "UG_COEFF"("#c"), %%mm3     \n\t"\
+    "pmulhw "VG_COEFF"("#c"), %%mm4     \n\t"\
+    /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+    "movq  (%0, "#index", 2), %%mm1     \n\t" /*buf0[eax]*/\
+    "movq 8(%0, "#index", 2), %%mm7     \n\t" /*buf0[eax]*/\
+    "psraw                $4, %%mm1     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+    "psraw                $4, %%mm7     \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+    "pmulhw "UB_COEFF"("#c"), %%mm2     \n\t"\
+    "pmulhw "VR_COEFF"("#c"), %%mm5     \n\t"\
+    "psubw  "Y_OFFSET"("#c"), %%mm1     \n\t" /* 8(Y-16)*/\
+    "psubw  "Y_OFFSET"("#c"), %%mm7     \n\t" /* 8(Y-16)*/\
+    "pmulhw  "Y_COEFF"("#c"), %%mm1     \n\t"\
+    "pmulhw  "Y_COEFF"("#c"), %%mm7     \n\t"\
+    /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+    "paddw             %%mm3, %%mm4     \n\t"\
+    "movq              %%mm2, %%mm0     \n\t"\
+    "movq              %%mm5, %%mm6     \n\t"\
+    "movq              %%mm4, %%mm3     \n\t"\
+    "punpcklwd         %%mm2, %%mm2     \n\t"\
+    "punpcklwd         %%mm5, %%mm5     \n\t"\
+    "punpcklwd         %%mm4, %%mm4     \n\t"\
+    "paddw             %%mm1, %%mm2     \n\t"\
+    "paddw             %%mm1, %%mm5     \n\t"\
+    "paddw             %%mm1, %%mm4     \n\t"\
+    "punpckhwd         %%mm0, %%mm0     \n\t"\
+    "punpckhwd         %%mm6, %%mm6     \n\t"\
+    "punpckhwd         %%mm3, %%mm3     \n\t"\
+    "paddw             %%mm7, %%mm0     \n\t"\
+    "paddw             %%mm7, %%mm6     \n\t"\
+    "paddw             %%mm7, %%mm3     \n\t"\
+    /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+    "packuswb          %%mm0, %%mm2     \n\t"\
+    "packuswb          %%mm6, %%mm5     \n\t"\
+    "packuswb          %%mm3, %%mm4     \n\t"\
+
+#define YSCALEYUV2RGB1b(index, c)  REAL_YSCALEYUV2RGB1b(index, c)
+
+#define REAL_YSCALEYUV2RGB1_ALPHA(index) \
+    "movq  (%1, "#index", 2), %%mm7     \n\t" /* abuf0[index  ]     */\
+    "movq 8(%1, "#index", 2), %%mm1     \n\t" /* abuf0[index+4]     */\
+    "psraw                $7, %%mm7     \n\t" /* abuf0[index  ] >>7 */\
+    "psraw                $7, %%mm1     \n\t" /* abuf0[index+4] >>7 */\
+    "packuswb          %%mm1, %%mm7     \n\t"
+#define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
+
+#define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
+    "movq       "#b", "#q2"     \n\t" /* B */\
+    "movq       "#r", "#t"      \n\t" /* R */\
+    "punpcklbw  "#g", "#b"      \n\t" /* GBGBGBGB 0 */\
+    "punpcklbw  "#a", "#r"      \n\t" /* ARARARAR 0 */\
+    "punpckhbw  "#g", "#q2"     \n\t" /* GBGBGBGB 2 */\
+    "punpckhbw  "#a", "#t"      \n\t" /* ARARARAR 2 */\
+    "movq       "#b", "#q0"     \n\t" /* GBGBGBGB 0 */\
+    "movq      "#q2", "#q3"     \n\t" /* GBGBGBGB 2 */\
+    "punpcklwd  "#r", "#q0"     \n\t" /* ARGBARGB 0 */\
+    "punpckhwd  "#r", "#b"      \n\t" /* ARGBARGB 1 */\
+    "punpcklwd  "#t", "#q2"     \n\t" /* ARGBARGB 2 */\
+    "punpckhwd  "#t", "#q3"     \n\t" /* ARGBARGB 3 */\
+\
+    MOVNTQ(   q0,   (dst, index, 4))\
+    MOVNTQ(    b,  8(dst, index, 4))\
+    MOVNTQ(   q2, 16(dst, index, 4))\
+    MOVNTQ(   q3, 24(dst, index, 4))\
+\
+    "add      $8, "#index"      \n\t"\
+    "cmp "#dstw", "#index"      \n\t"\
+    " jb      1b                \n\t"
+#define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)  REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
+
+#define REAL_WRITERGB16(dst, dstw, index) \
+    "pand "MANGLE(bF8)", %%mm2  \n\t" /* B */\
+    "pand "MANGLE(bFC)", %%mm4  \n\t" /* G */\
+    "pand "MANGLE(bF8)", %%mm5  \n\t" /* R */\
+    "psrlq           $3, %%mm2  \n\t"\
+\
+    "movq         %%mm2, %%mm1  \n\t"\
+    "movq         %%mm4, %%mm3  \n\t"\
+\
+    "punpcklbw    %%mm7, %%mm3  \n\t"\
+    "punpcklbw    %%mm5, %%mm2  \n\t"\
+    "punpckhbw    %%mm7, %%mm4  \n\t"\
+    "punpckhbw    %%mm5, %%mm1  \n\t"\
+\
+    "psllq           $3, %%mm3  \n\t"\
+    "psllq           $3, %%mm4  \n\t"\
+\
+    "por          %%mm3, %%mm2  \n\t"\
+    "por          %%mm4, %%mm1  \n\t"\
+\
+    MOVNTQ(%%mm2,  (dst, index, 2))\
+    MOVNTQ(%%mm1, 8(dst, index, 2))\
+\
+    "add             $8, "#index"   \n\t"\
+    "cmp        "#dstw", "#index"   \n\t"\
+    " jb             1b             \n\t"
+#define WRITERGB16(dst, dstw, index)  REAL_WRITERGB16(dst, dstw, index)
+
+#define REAL_WRITERGB15(dst, dstw, index) \
+    "pand "MANGLE(bF8)", %%mm2  \n\t" /* B */\
+    "pand "MANGLE(bF8)", %%mm4  \n\t" /* G */\
+    "pand "MANGLE(bF8)", %%mm5  \n\t" /* R */\
+    "psrlq           $3, %%mm2  \n\t"\
+    "psrlq           $1, %%mm5  \n\t"\
+\
+    "movq         %%mm2, %%mm1  \n\t"\
+    "movq         %%mm4, %%mm3  \n\t"\
+\
+    "punpcklbw    %%mm7, %%mm3  \n\t"\
+    "punpcklbw    %%mm5, %%mm2  \n\t"\
+    "punpckhbw    %%mm7, %%mm4  \n\t"\
+    "punpckhbw    %%mm5, %%mm1  \n\t"\
+\
+    "psllq           $2, %%mm3  \n\t"\
+    "psllq           $2, %%mm4  \n\t"\
+\
+    "por          %%mm3, %%mm2  \n\t"\
+    "por          %%mm4, %%mm1  \n\t"\
+\
+    MOVNTQ(%%mm2,  (dst, index, 2))\
+    MOVNTQ(%%mm1, 8(dst, index, 2))\
+\
+    "add             $8, "#index"   \n\t"\
+    "cmp        "#dstw", "#index"   \n\t"\
+    " jb             1b             \n\t"
+#define WRITERGB15(dst, dstw, index)  REAL_WRITERGB15(dst, dstw, index)
+
+#define WRITEBGR24OLD(dst, dstw, index) \
+    /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+    "movq      %%mm2, %%mm1             \n\t" /* B */\
+    "movq      %%mm5, %%mm6             \n\t" /* R */\
+    "punpcklbw %%mm4, %%mm2             \n\t" /* GBGBGBGB 0 */\
+    "punpcklbw %%mm7, %%mm5             \n\t" /* 0R0R0R0R 0 */\
+    "punpckhbw %%mm4, %%mm1             \n\t" /* GBGBGBGB 2 */\
+    "punpckhbw %%mm7, %%mm6             \n\t" /* 0R0R0R0R 2 */\
+    "movq      %%mm2, %%mm0             \n\t" /* GBGBGBGB 0 */\
+    "movq      %%mm1, %%mm3             \n\t" /* GBGBGBGB 2 */\
+    "punpcklwd %%mm5, %%mm0             \n\t" /* 0RGB0RGB 0 */\
+    "punpckhwd %%mm5, %%mm2             \n\t" /* 0RGB0RGB 1 */\
+    "punpcklwd %%mm6, %%mm1             \n\t" /* 0RGB0RGB 2 */\
+    "punpckhwd %%mm6, %%mm3             \n\t" /* 0RGB0RGB 3 */\
+\
+    "movq      %%mm0, %%mm4             \n\t" /* 0RGB0RGB 0 */\
+    "psrlq        $8, %%mm0             \n\t" /* 00RGB0RG 0 */\
+    "pand "MANGLE(bm00000111)", %%mm4   \n\t" /* 00000RGB 0 */\
+    "pand "MANGLE(bm11111000)", %%mm0   \n\t" /* 00RGB000 0.5 */\
+    "por       %%mm4, %%mm0             \n\t" /* 00RGBRGB 0 */\
+    "movq      %%mm2, %%mm4             \n\t" /* 0RGB0RGB 1 */\
+    "psllq       $48, %%mm2             \n\t" /* GB000000 1 */\
+    "por       %%mm2, %%mm0             \n\t" /* GBRGBRGB 0 */\
+\
+    "movq      %%mm4, %%mm2             \n\t" /* 0RGB0RGB 1 */\
+    "psrld       $16, %%mm4             \n\t" /* 000R000R 1 */\
+    "psrlq       $24, %%mm2             \n\t" /* 0000RGB0 1.5 */\
+    "por       %%mm4, %%mm2             \n\t" /* 000RRGBR 1 */\
+    "pand "MANGLE(bm00001111)", %%mm2   \n\t" /* 0000RGBR 1 */\
+    "movq      %%mm1, %%mm4             \n\t" /* 0RGB0RGB 2 */\
+    "psrlq        $8, %%mm1             \n\t" /* 00RGB0RG 2 */\
+    "pand "MANGLE(bm00000111)", %%mm4   \n\t" /* 00000RGB 2 */\
+    "pand "MANGLE(bm11111000)", %%mm1   \n\t" /* 00RGB000 2.5 */\
+    "por       %%mm4, %%mm1             \n\t" /* 00RGBRGB 2 */\
+    "movq      %%mm1, %%mm4             \n\t" /* 00RGBRGB 2 */\
+    "psllq       $32, %%mm1             \n\t" /* BRGB0000 2 */\
+    "por       %%mm1, %%mm2             \n\t" /* BRGBRGBR 1 */\
+\
+    "psrlq       $32, %%mm4             \n\t" /* 000000RG 2.5 */\
+    "movq      %%mm3, %%mm5             \n\t" /* 0RGB0RGB 3 */\
+    "psrlq        $8, %%mm3             \n\t" /* 00RGB0RG 3 */\
+    "pand "MANGLE(bm00000111)", %%mm5   \n\t" /* 00000RGB 3 */\
+    "pand "MANGLE(bm11111000)", %%mm3   \n\t" /* 00RGB000 3.5 */\
+    "por       %%mm5, %%mm3             \n\t" /* 00RGBRGB 3 */\
+    "psllq       $16, %%mm3             \n\t" /* RGBRGB00 3 */\
+    "por       %%mm4, %%mm3             \n\t" /* RGBRGBRG 2.5 */\
+\
+    MOVNTQ(%%mm0,   (dst))\
+    MOVNTQ(%%mm2,  8(dst))\
+    MOVNTQ(%%mm3, 16(dst))\
+    "add         $24, "#dst"            \n\t"\
+\
+    "add          $8, "#index"          \n\t"\
+    "cmp     "#dstw", "#index"          \n\t"\
+    " jb          1b                    \n\t"
+
+#define WRITEBGR24MMX(dst, dstw, index) \
+    /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+    "movq      %%mm2, %%mm1     \n\t" /* B */\
+    "movq      %%mm5, %%mm6     \n\t" /* R */\
+    "punpcklbw %%mm4, %%mm2     \n\t" /* GBGBGBGB 0 */\
+    "punpcklbw %%mm7, %%mm5     \n\t" /* 0R0R0R0R 0 */\
+    "punpckhbw %%mm4, %%mm1     \n\t" /* GBGBGBGB 2 */\
+    "punpckhbw %%mm7, %%mm6     \n\t" /* 0R0R0R0R 2 */\
+    "movq      %%mm2, %%mm0     \n\t" /* GBGBGBGB 0 */\
+    "movq      %%mm1, %%mm3     \n\t" /* GBGBGBGB 2 */\
+    "punpcklwd %%mm5, %%mm0     \n\t" /* 0RGB0RGB 0 */\
+    "punpckhwd %%mm5, %%mm2     \n\t" /* 0RGB0RGB 1 */\
+    "punpcklwd %%mm6, %%mm1     \n\t" /* 0RGB0RGB 2 */\
+    "punpckhwd %%mm6, %%mm3     \n\t" /* 0RGB0RGB 3 */\
+\
+    "movq      %%mm0, %%mm4     \n\t" /* 0RGB0RGB 0 */\
+    "movq      %%mm2, %%mm6     \n\t" /* 0RGB0RGB 1 */\
+    "movq      %%mm1, %%mm5     \n\t" /* 0RGB0RGB 2 */\
+    "movq      %%mm3, %%mm7     \n\t" /* 0RGB0RGB 3 */\
+\
+    "psllq       $40, %%mm0     \n\t" /* RGB00000 0 */\
+    "psllq       $40, %%mm2     \n\t" /* RGB00000 1 */\
+    "psllq       $40, %%mm1     \n\t" /* RGB00000 2 */\
+    "psllq       $40, %%mm3     \n\t" /* RGB00000 3 */\
+\
+    "punpckhdq %%mm4, %%mm0     \n\t" /* 0RGBRGB0 0 */\
+    "punpckhdq %%mm6, %%mm2     \n\t" /* 0RGBRGB0 1 */\
+    "punpckhdq %%mm5, %%mm1     \n\t" /* 0RGBRGB0 2 */\
+    "punpckhdq %%mm7, %%mm3     \n\t" /* 0RGBRGB0 3 */\
+\
+    "psrlq        $8, %%mm0     \n\t" /* 00RGBRGB 0 */\
+    "movq      %%mm2, %%mm6     \n\t" /* 0RGBRGB0 1 */\
+    "psllq       $40, %%mm2     \n\t" /* GB000000 1 */\
+    "por       %%mm2, %%mm0     \n\t" /* GBRGBRGB 0 */\
+    MOVNTQ(%%mm0, (dst))\
+\
+    "psrlq       $24, %%mm6     \n\t" /* 0000RGBR 1 */\
+    "movq      %%mm1, %%mm5     \n\t" /* 0RGBRGB0 2 */\
+    "psllq       $24, %%mm1     \n\t" /* BRGB0000 2 */\
+    "por       %%mm1, %%mm6     \n\t" /* BRGBRGBR 1 */\
+    MOVNTQ(%%mm6, 8(dst))\
+\
+    "psrlq       $40, %%mm5     \n\t" /* 000000RG 2 */\
+    "psllq        $8, %%mm3     \n\t" /* RGBRGB00 3 */\
+    "por       %%mm3, %%mm5     \n\t" /* RGBRGBRG 2 */\
+    MOVNTQ(%%mm5, 16(dst))\
+\
+    "add         $24, "#dst"    \n\t"\
+\
+    "add          $8, "#index"  \n\t"\
+    "cmp     "#dstw", "#index"  \n\t"\
+    " jb          1b            \n\t"
+
+#define WRITEBGR24MMX2(dst, dstw, index) \
+    /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+    "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
+    "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
+    "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2  B1 B0 B1 B0 */\
+    "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2  G1 G0 G1 G0 */\
+    "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0  R1 R0 R1 R0 */\
+\
+    "pand   %%mm0, %%mm1        \n\t" /*    B2        B1       B0 */\
+    "pand   %%mm0, %%mm3        \n\t" /*    G2        G1       G0 */\
+    "pand   %%mm7, %%mm6        \n\t" /*       R1        R0       */\
+\
+    "psllq     $8, %%mm3        \n\t" /* G2        G1       G0    */\
+    "por    %%mm1, %%mm6        \n\t"\
+    "por    %%mm3, %%mm6        \n\t"\
+    MOVNTQ(%%mm6, (dst))\
+\
+    "psrlq     $8, %%mm4        \n\t" /* 00 G7 G6 G5  G4 G3 G2 G1 */\
+    "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4  B3 B2 B3 B2 */\
+    "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3  G4 G3 G4 G3 */\
+    "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4  R3 R2 R3 R2 */\
+\
+    "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5       B4        B3    */\
+    "pand   %%mm7, %%mm3        \n\t" /*       G4        G3       */\
+    "pand   %%mm0, %%mm6        \n\t" /*    R4        R3       R2 */\
+\
+    "por    %%mm1, %%mm3        \n\t" /* B5    G4 B4     G3 B3    */\
+    "por    %%mm3, %%mm6        \n\t"\
+    MOVNTQ(%%mm6, 8(dst))\
+\
+    "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6  B7 B6 B6 B7 */\
+    "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7  G6 G5 G6 G5 */\
+    "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6  R5 R4 R5 R4 */\
+\
+    "pand   %%mm7, %%mm1        \n\t" /*       B7        B6       */\
+    "pand   %%mm0, %%mm3        \n\t" /*    G7        G6       G5 */\
+    "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7       R6        R5    */\
+\
+    "por    %%mm1, %%mm3        \n\t"\
+    "por    %%mm3, %%mm6        \n\t"\
+    MOVNTQ(%%mm6, 16(dst))\
+\
+    "add      $24, "#dst"       \n\t"\
+\
+    "add       $8, "#index"     \n\t"\
+    "cmp  "#dstw", "#index"     \n\t"\
+    " jb       1b               \n\t"
+
+#if COMPILE_TEMPLATE_MMX2
+#undef WRITEBGR24
+#define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX2(dst, dstw, index)
+#else
+#undef WRITEBGR24
+#define WRITEBGR24(dst, dstw, index)  WRITEBGR24MMX(dst, dstw, index)
+#endif
+
+#define REAL_WRITEYUY2(dst, dstw, index) \
+    "packuswb  %%mm3, %%mm3     \n\t"\
+    "packuswb  %%mm4, %%mm4     \n\t"\
+    "packuswb  %%mm7, %%mm1     \n\t"\
+    "punpcklbw %%mm4, %%mm3     \n\t"\
+    "movq      %%mm1, %%mm7     \n\t"\
+    "punpcklbw %%mm3, %%mm1     \n\t"\
+    "punpckhbw %%mm3, %%mm7     \n\t"\
+\
+    MOVNTQ(%%mm1, (dst, index, 2))\
+    MOVNTQ(%%mm7, 8(dst, index, 2))\
+\
+    "add          $8, "#index"  \n\t"\
+    "cmp     "#dstw", "#index"  \n\t"\
+    " jb          1b            \n\t"
+#define WRITEYUY2(dst, dstw, index)  REAL_WRITEYUY2(dst, dstw, index)
+
+
+static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                                    const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc,
+                                    uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
+{
+#if COMPILE_TEMPLATE_MMX
+    if(!(c->flags & SWS_BITEXACT)) {
+        if (c->flags & SWS_ACCURATE_RND) {
+            if (uDest) {
+                YSCALEYUV2YV12X_ACCURATE(   "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
+                YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
+            }
+            if (CONFIG_SWSCALE_ALPHA && aDest) {
+                YSCALEYUV2YV12X_ACCURATE(   "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
+            }
+
+            YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
+        } else {
+            if (uDest) {
+                YSCALEYUV2YV12X(   "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
+                YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
+            }
+            if (CONFIG_SWSCALE_ALPHA && aDest) {
+                YSCALEYUV2YV12X(   "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
+            }
+
+            YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
+        }
+        return;
+    }
+#endif
+#if COMPILE_TEMPLATE_ALTIVEC
+    yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
+                          chrFilter, chrSrc, chrFilterSize,
+                          dest, uDest, vDest, dstW, chrDstW);
+#else //COMPILE_TEMPLATE_ALTIVEC
+    yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
+                chrFilter, chrSrc, chrFilterSize,
+                alpSrc, dest, uDest, vDest, aDest, dstW, chrDstW);
+#endif //!COMPILE_TEMPLATE_ALTIVEC
+}
+
+static inline void RENAME(yuv2nv12X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                                     const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                                     uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, enum PixelFormat dstFormat)
+{
+    yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
+                 chrFilter, chrSrc, chrFilterSize,
+                 dest, uDest, dstW, chrDstW, dstFormat);
+}
+
+static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
+                                    uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
+{
+    int i;
+#if COMPILE_TEMPLATE_MMX
+    if(!(c->flags & SWS_BITEXACT)) {
+        long p= 4;
+        const uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
+        uint8_t *dst[4]= {aDest, dest, uDest, vDest};
+        x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
+
+        if (c->flags & SWS_ACCURATE_RND) {
+            while(p--) {
+                if (dst[p]) {
+                    __asm__ volatile(
+                        YSCALEYUV2YV121_ACCURATE
+                        :: "r" (src[p]), "r" (dst[p] + counter[p]),
+                        "g" (-counter[p])
+                        : "%"REG_a
+                    );
+                }
+            }
+        } else {
+            while(p--) {
+                if (dst[p]) {
+                    __asm__ volatile(
+                        YSCALEYUV2YV121
+                        :: "r" (src[p]), "r" (dst[p] + counter[p]),
+                        "g" (-counter[p])
+                        : "%"REG_a
+                    );
+                }
+            }
+        }
+        return;
+    }
+#endif
+    for (i=0; i<dstW; i++) {
+        int val= (lumSrc[i]+64)>>7;
+
+        if (val&256) {
+            if (val<0) val=0;
+            else       val=255;
+        }
+
+        dest[i]= val;
+    }
+
+    if (uDest)
+        for (i=0; i<chrDstW; i++) {
+            int u=(chrSrc[i       ]+64)>>7;
+            int v=(chrSrc[i + VOFW]+64)>>7;
+
+            if ((u|v)&256) {
+                if (u<0)        u=0;
+                else if (u>255) u=255;
+                if (v<0)        v=0;
+                else if (v>255) v=255;
+            }
+
+            uDest[i]= u;
+            vDest[i]= v;
+        }
+
+    if (CONFIG_SWSCALE_ALPHA && aDest)
+        for (i=0; i<dstW; i++) {
+            int val= (alpSrc[i]+64)>>7;
+            aDest[i]= av_clip_uint8(val);
+        }
+}
+
+
+/**
+ * vertical scale YV12 to RGB
+ */
+static inline void RENAME(yuv2packedX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
+                                       const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
+                                       const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
+{
+#if COMPILE_TEMPLATE_MMX
+    x86_reg dummy=0;
+    if(!(c->flags & SWS_BITEXACT)) {
+        if (c->flags & SWS_ACCURATE_RND) {
+            switch(c->dstFormat) {
+            case PIX_FMT_RGB32:
+                if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
+                    YSCALEYUV2PACKEDX_ACCURATE
+                    YSCALEYUV2RGBX
+                    "movq                      %%mm2, "U_TEMP"(%0)  \n\t"
+                    "movq                      %%mm4, "V_TEMP"(%0)  \n\t"
+                    "movq                      %%mm5, "Y_TEMP"(%0)  \n\t"
+                    YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
+                    "movq               "Y_TEMP"(%0), %%mm5         \n\t"
+                    "psraw                        $3, %%mm1         \n\t"
+                    "psraw                        $3, %%mm7         \n\t"
+                    "packuswb                  %%mm7, %%mm1         \n\t"
+                    WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
+
+                    YSCALEYUV2PACKEDX_END
+                } else {
+                    YSCALEYUV2PACKEDX_ACCURATE
+                    YSCALEYUV2RGBX
+                    "pcmpeqd %%mm7, %%mm7 \n\t"
+                    WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+
+                    YSCALEYUV2PACKEDX_END
+                }
+                return;
+            case PIX_FMT_BGR24:
+                YSCALEYUV2PACKEDX_ACCURATE
+                YSCALEYUV2RGBX
+                "pxor %%mm7, %%mm7 \n\t"
+                "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
+                "add %4, %%"REG_c"                        \n\t"
+                WRITEBGR24(%%REGc, %5, %%REGa)
+
+
+                :: "r" (&c->redDither),
+                "m" (dummy), "m" (dummy), "m" (dummy),
+                "r" (dest), "m" (dstW)
+                : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
+                );
+                return;
+            case PIX_FMT_RGB555:
+                YSCALEYUV2PACKEDX_ACCURATE
+                YSCALEYUV2RGBX
+                "pxor %%mm7, %%mm7 \n\t"
+                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
+                "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
+                "paddusb "RED_DITHER"(%0), %%mm5\n\t"
+#endif
+
+                WRITERGB15(%4, %5, %%REGa)
+                YSCALEYUV2PACKEDX_END
+                return;
+            case PIX_FMT_RGB565:
+                YSCALEYUV2PACKEDX_ACCURATE
+                YSCALEYUV2RGBX
+                "pxor %%mm7, %%mm7 \n\t"
+                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
+                "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
+                "paddusb "RED_DITHER"(%0), %%mm5\n\t"
+#endif
+
+                WRITERGB16(%4, %5, %%REGa)
+                YSCALEYUV2PACKEDX_END
+                return;
+            case PIX_FMT_YUYV422:
+                YSCALEYUV2PACKEDX_ACCURATE
+                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+                "psraw $3, %%mm3    \n\t"
+                "psraw $3, %%mm4    \n\t"
+                "psraw $3, %%mm1    \n\t"
+                "psraw $3, %%mm7    \n\t"
+                WRITEYUY2(%4, %5, %%REGa)
+                YSCALEYUV2PACKEDX_END
+                return;
+            }
+        } else {
+            switch(c->dstFormat) {
+            case PIX_FMT_RGB32:
+                if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
+                    YSCALEYUV2PACKEDX
+                    YSCALEYUV2RGBX
+                    YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
+                    "psraw                        $3, %%mm1         \n\t"
+                    "psraw                        $3, %%mm7         \n\t"
+                    "packuswb                  %%mm7, %%mm1         \n\t"
+                    WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
+                    YSCALEYUV2PACKEDX_END
+                } else {
+                    YSCALEYUV2PACKEDX
+                    YSCALEYUV2RGBX
+                    "pcmpeqd %%mm7, %%mm7 \n\t"
+                    WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+                    YSCALEYUV2PACKEDX_END
+                }
+                return;
+            case PIX_FMT_BGR24:
+                YSCALEYUV2PACKEDX
+                YSCALEYUV2RGBX
+                "pxor                    %%mm7, %%mm7       \n\t"
+                "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"   \n\t" //FIXME optimize
+                "add                        %4, %%"REG_c"   \n\t"
+                WRITEBGR24(%%REGc, %5, %%REGa)
+
+                :: "r" (&c->redDither),
+                "m" (dummy), "m" (dummy), "m" (dummy),
+                "r" (dest),  "m" (dstW)
+                : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
+                );
+                return;
+            case PIX_FMT_RGB555:
+                YSCALEYUV2PACKEDX
+                YSCALEYUV2RGBX
+                "pxor %%mm7, %%mm7 \n\t"
+                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                "paddusb "BLUE_DITHER"(%0), %%mm2  \n\t"
+                "paddusb "GREEN_DITHER"(%0), %%mm4  \n\t"
+                "paddusb "RED_DITHER"(%0), %%mm5  \n\t"
+#endif
+
+                WRITERGB15(%4, %5, %%REGa)
+                YSCALEYUV2PACKEDX_END
+                return;
+            case PIX_FMT_RGB565:
+                YSCALEYUV2PACKEDX
+                YSCALEYUV2RGBX
+                "pxor %%mm7, %%mm7 \n\t"
+                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                "paddusb "BLUE_DITHER"(%0), %%mm2  \n\t"
+                "paddusb "GREEN_DITHER"(%0), %%mm4  \n\t"
+                "paddusb "RED_DITHER"(%0), %%mm5  \n\t"
+#endif
+
+                WRITERGB16(%4, %5, %%REGa)
+                YSCALEYUV2PACKEDX_END
+                return;
+            case PIX_FMT_YUYV422:
+                YSCALEYUV2PACKEDX
+                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+                "psraw $3, %%mm3    \n\t"
+                "psraw $3, %%mm4    \n\t"
+                "psraw $3, %%mm1    \n\t"
+                "psraw $3, %%mm7    \n\t"
+                WRITEYUY2(%4, %5, %%REGa)
+                YSCALEYUV2PACKEDX_END
+                return;
+            }
+        }
+    }
+#endif /* COMPILE_TEMPLATE_MMX */
+#if COMPILE_TEMPLATE_ALTIVEC
+    /* The following list of supported dstFormat values should
+       match what's found in the body of ff_yuv2packedX_altivec() */
+    if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf &&
+         (c->dstFormat==PIX_FMT_ABGR  || c->dstFormat==PIX_FMT_BGRA  ||
+          c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
+          c->dstFormat==PIX_FMT_RGBA  || c->dstFormat==PIX_FMT_ARGB))
+            ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,
+                                   chrFilter, chrSrc, chrFilterSize,
+                                   dest, dstW, dstY);
+    else
+#endif
+        yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
+                       chrFilter, chrSrc, chrFilterSize,
+                       alpSrc, dest, dstW, dstY);
+}
+
+/**
+ * vertical bilinear scale YV12 to RGB
+ */
+static inline void RENAME(yuv2packed2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
+                          const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
+{
+    int  yalpha1=4095- yalpha;
+    int uvalpha1=4095-uvalpha;
+    int i;
+
+#if COMPILE_TEMPLATE_MMX
+    if(!(c->flags & SWS_BITEXACT)) {
+        switch(c->dstFormat) {
+        //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
+        case PIX_FMT_RGB32:
+            if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
+#if ARCH_X86_64
+                __asm__ volatile(
+                    YSCALEYUV2RGB(%%r8, %5)
+                    YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
+                    "psraw                  $3, %%mm1       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
+                    "psraw                  $3, %%mm7       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
+                    "packuswb            %%mm7, %%mm1       \n\t"
+                    WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
+                    "a" (&c->redDither)
+                    ,"r" (abuf0), "r" (abuf1)
+                    : "%r8"
+                );
+#else
+                *(const uint16_t **)(&c->u_temp)=abuf0;
+                *(const uint16_t **)(&c->v_temp)=abuf1;
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2RGB(%%REGBP, %5)
+                    "push                   %0              \n\t"
+                    "push                   %1              \n\t"
+                    "mov          "U_TEMP"(%5), %0          \n\t"
+                    "mov          "V_TEMP"(%5), %1          \n\t"
+                    YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
+                    "psraw                  $3, %%mm1       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
+                    "psraw                  $3, %%mm7       \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
+                    "packuswb            %%mm7, %%mm1       \n\t"
+                    "pop                    %1              \n\t"
+                    "pop                    %0              \n\t"
+                    WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+#endif
+            } else {
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2RGB(%%REGBP, %5)
+                    "pcmpeqd %%mm7, %%mm7                   \n\t"
+                    WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+            }
+            return;
+        case PIX_FMT_BGR24:
+            __asm__ volatile(
+                "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                "mov        %4, %%"REG_b"               \n\t"
+                "push %%"REG_BP"                        \n\t"
+                YSCALEYUV2RGB(%%REGBP, %5)
+                "pxor    %%mm7, %%mm7                   \n\t"
+                WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+                "pop %%"REG_BP"                         \n\t"
+                "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+                :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                "a" (&c->redDither)
+            );
+            return;
+        case PIX_FMT_RGB555:
+            __asm__ volatile(
+                "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                "mov        %4, %%"REG_b"               \n\t"
+                "push %%"REG_BP"                        \n\t"
+                YSCALEYUV2RGB(%%REGBP, %5)
+                "pxor    %%mm7, %%mm7                   \n\t"
+                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
+                "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
+                "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
+#endif
+
+                WRITERGB15(%%REGb, 8280(%5), %%REGBP)
+                "pop %%"REG_BP"                         \n\t"
+                "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                "a" (&c->redDither)
+            );
+            return;
+        case PIX_FMT_RGB565:
+            __asm__ volatile(
+                "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                "mov        %4, %%"REG_b"               \n\t"
+                "push %%"REG_BP"                        \n\t"
+                YSCALEYUV2RGB(%%REGBP, %5)
+                "pxor    %%mm7, %%mm7                   \n\t"
+                /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
+                "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
+                "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
+#endif
+
+                WRITERGB16(%%REGb, 8280(%5), %%REGBP)
+                "pop %%"REG_BP"                         \n\t"
+                "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+                :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                "a" (&c->redDither)
+            );
+            return;
+        case PIX_FMT_YUYV422:
+            __asm__ volatile(
+                "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                "mov %4, %%"REG_b"                        \n\t"
+                "push %%"REG_BP"                        \n\t"
+                YSCALEYUV2PACKED(%%REGBP, %5)
+                WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+                "pop %%"REG_BP"                         \n\t"
+                "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+                :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                "a" (&c->redDither)
+            );
+            return;
+        default: break;
+        }
+    }
+#endif //COMPILE_TEMPLATE_MMX
+    YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C(void,0), YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
+}
+
+/**
+ * YV12 to RGB without scaling or interpolating
+ */
+static inline void RENAME(yuv2packed1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
+                          const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, enum PixelFormat dstFormat, int flags, int y)
+{
+    const int yalpha1=0;
+    int i;
+
+    const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
+    const int yalpha= 4096; //FIXME ...
+
+    if (flags&SWS_FULL_CHR_H_INT) {
+        c->yuv2packed2(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
+        return;
+    }
+
+#if COMPILE_TEMPLATE_MMX
+    if(!(flags & SWS_BITEXACT)) {
+        if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
+            switch(dstFormat) {
+            case PIX_FMT_RGB32:
+                if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
+                    __asm__ volatile(
+                        "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                        "mov        %4, %%"REG_b"               \n\t"
+                        "push %%"REG_BP"                        \n\t"
+                        YSCALEYUV2RGB1(%%REGBP, %5)
+                        YSCALEYUV2RGB1_ALPHA(%%REGBP)
+                        WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+                        "pop %%"REG_BP"                         \n\t"
+                        "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                        :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                        "a" (&c->redDither)
+                    );
+                } else {
+                    __asm__ volatile(
+                        "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                        "mov        %4, %%"REG_b"               \n\t"
+                        "push %%"REG_BP"                        \n\t"
+                        YSCALEYUV2RGB1(%%REGBP, %5)
+                        "pcmpeqd %%mm7, %%mm7                   \n\t"
+                        WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+                        "pop %%"REG_BP"                         \n\t"
+                        "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                        :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                        "a" (&c->redDither)
+                    );
+                }
+                return;
+            case PIX_FMT_BGR24:
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2RGB1(%%REGBP, %5)
+                    "pxor    %%mm7, %%mm7                   \n\t"
+                    WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+                return;
+            case PIX_FMT_RGB555:
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2RGB1(%%REGBP, %5)
+                    "pxor    %%mm7, %%mm7                   \n\t"
+                    /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                    "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
+                    "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
+                    "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
+#endif
+                    WRITERGB15(%%REGb, 8280(%5), %%REGBP)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+                return;
+            case PIX_FMT_RGB565:
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2RGB1(%%REGBP, %5)
+                    "pxor    %%mm7, %%mm7                   \n\t"
+                    /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                    "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
+                    "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
+                    "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
+#endif
+
+                    WRITERGB16(%%REGb, 8280(%5), %%REGBP)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+                return;
+            case PIX_FMT_YUYV422:
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2PACKED1(%%REGBP, %5)
+                    WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+                return;
+            }
+        } else {
+            switch(dstFormat) {
+            case PIX_FMT_RGB32:
+                if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
+                    __asm__ volatile(
+                        "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                        "mov        %4, %%"REG_b"               \n\t"
+                        "push %%"REG_BP"                        \n\t"
+                        YSCALEYUV2RGB1b(%%REGBP, %5)
+                        YSCALEYUV2RGB1_ALPHA(%%REGBP)
+                        WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+                        "pop %%"REG_BP"                         \n\t"
+                        "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                        :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                        "a" (&c->redDither)
+                    );
+                } else {
+                    __asm__ volatile(
+                        "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                        "mov        %4, %%"REG_b"               \n\t"
+                        "push %%"REG_BP"                        \n\t"
+                        YSCALEYUV2RGB1b(%%REGBP, %5)
+                        "pcmpeqd %%mm7, %%mm7                   \n\t"
+                        WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
+                        "pop %%"REG_BP"                         \n\t"
+                        "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                        :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                        "a" (&c->redDither)
+                    );
+                }
+                return;
+            case PIX_FMT_BGR24:
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2RGB1b(%%REGBP, %5)
+                    "pxor    %%mm7, %%mm7                   \n\t"
+                    WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+                return;
+            case PIX_FMT_RGB555:
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2RGB1b(%%REGBP, %5)
+                    "pxor    %%mm7, %%mm7                   \n\t"
+                    /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                    "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
+                    "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
+                    "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
+#endif
+                    WRITERGB15(%%REGb, 8280(%5), %%REGBP)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+                return;
+            case PIX_FMT_RGB565:
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2RGB1b(%%REGBP, %5)
+                    "pxor    %%mm7, %%mm7                   \n\t"
+                    /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+#ifdef DITHER1XBPP
+                    "paddusb "BLUE_DITHER"(%5), %%mm2      \n\t"
+                    "paddusb "GREEN_DITHER"(%5), %%mm4      \n\t"
+                    "paddusb "RED_DITHER"(%5), %%mm5      \n\t"
+#endif
+
+                    WRITERGB16(%%REGb, 8280(%5), %%REGBP)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+                return;
+            case PIX_FMT_YUYV422:
+                __asm__ volatile(
+                    "mov %%"REG_b", "ESP_OFFSET"(%5)        \n\t"
+                    "mov        %4, %%"REG_b"               \n\t"
+                    "push %%"REG_BP"                        \n\t"
+                    YSCALEYUV2PACKED1b(%%REGBP, %5)
+                    WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+                    "pop %%"REG_BP"                         \n\t"
+                    "mov "ESP_OFFSET"(%5), %%"REG_b"        \n\t"
+
+                    :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+                    "a" (&c->redDither)
+                );
+                return;
+            }
+        }
+    }
+#endif /* COMPILE_TEMPLATE_MMX */
+    if (uvalpha < 2048) {
+        YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
+    } else {
+        YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
+    }
+}
+
+//FIXME yuy2* can read up to 7 samples too much
+
+static inline void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    __asm__ volatile(
+        "movq "MANGLE(bm01010101)", %%mm2           \n\t"
+        "mov                    %0, %%"REG_a"       \n\t"
+        "1:                                         \n\t"
+        "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
+        "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
+        "pand                %%mm2, %%mm0           \n\t"
+        "pand                %%mm2, %%mm1           \n\t"
+        "packuswb            %%mm1, %%mm0           \n\t"
+        "movq                %%mm0, (%2, %%"REG_a") \n\t"
+        "add                    $8, %%"REG_a"       \n\t"
+        " js                    1b                  \n\t"
+        : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
+        : "%"REG_a
+    );
+#else
+    int i;
+    for (i=0; i<width; i++)
+        dst[i]= src[2*i];
+#endif
+}
+
+static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    __asm__ volatile(
+        "movq "MANGLE(bm01010101)", %%mm4           \n\t"
+        "mov                    %0, %%"REG_a"       \n\t"
+        "1:                                         \n\t"
+        "movq    (%1, %%"REG_a",4), %%mm0           \n\t"
+        "movq   8(%1, %%"REG_a",4), %%mm1           \n\t"
+        "psrlw                  $8, %%mm0           \n\t"
+        "psrlw                  $8, %%mm1           \n\t"
+        "packuswb            %%mm1, %%mm0           \n\t"
+        "movq                %%mm0, %%mm1           \n\t"
+        "psrlw                  $8, %%mm0           \n\t"
+        "pand                %%mm4, %%mm1           \n\t"
+        "packuswb            %%mm0, %%mm0           \n\t"
+        "packuswb            %%mm1, %%mm1           \n\t"
+        "movd                %%mm0, (%3, %%"REG_a") \n\t"
+        "movd                %%mm1, (%2, %%"REG_a") \n\t"
+        "add                    $4, %%"REG_a"       \n\t"
+        " js                    1b                  \n\t"
+        : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
+        : "%"REG_a
+    );
+#else
+    int i;
+    for (i=0; i<width; i++) {
+        dstU[i]= src1[4*i + 1];
+        dstV[i]= src1[4*i + 3];
+    }
+#endif
+    assert(src1 == src2);
+}
+
+static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    __asm__ volatile(
+        "mov                    %0, %%"REG_a"       \n\t"
+        "1:                                         \n\t"
+        "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
+        "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
+        "movq    (%2, %%"REG_a",2), %%mm2           \n\t"
+        "movq   8(%2, %%"REG_a",2), %%mm3           \n\t"
+        "psrlw                  $8, %%mm0           \n\t"
+        "psrlw                  $8, %%mm1           \n\t"
+        "psrlw                  $8, %%mm2           \n\t"
+        "psrlw                  $8, %%mm3           \n\t"
+        "packuswb            %%mm1, %%mm0           \n\t"
+        "packuswb            %%mm3, %%mm2           \n\t"
+        "movq                %%mm0, (%3, %%"REG_a") \n\t"
+        "movq                %%mm2, (%4, %%"REG_a") \n\t"
+        "add                    $8, %%"REG_a"       \n\t"
+        " js                    1b                  \n\t"
+        : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
+        : "%"REG_a
+    );
+#else
+    int i;
+    for (i=0; i<width; i++) {
+        dstU[i]= src1[2*i + 1];
+        dstV[i]= src2[2*i + 1];
+    }
+#endif
+}
+
+/* This is almost identical to the previous, end exists only because
+ * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
+static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    __asm__ volatile(
+        "mov                  %0, %%"REG_a"         \n\t"
+        "1:                                         \n\t"
+        "movq  (%1, %%"REG_a",2), %%mm0             \n\t"
+        "movq 8(%1, %%"REG_a",2), %%mm1             \n\t"
+        "psrlw                $8, %%mm0             \n\t"
+        "psrlw                $8, %%mm1             \n\t"
+        "packuswb          %%mm1, %%mm0             \n\t"
+        "movq              %%mm0, (%2, %%"REG_a")   \n\t"
+        "add                  $8, %%"REG_a"         \n\t"
+        " js                  1b                    \n\t"
+        : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
+        : "%"REG_a
+    );
+#else
+    int i;
+    for (i=0; i<width; i++)
+        dst[i]= src[2*i+1];
+#endif
+}
+
+static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    __asm__ volatile(
+        "movq "MANGLE(bm01010101)", %%mm4           \n\t"
+        "mov                    %0, %%"REG_a"       \n\t"
+        "1:                                         \n\t"
+        "movq    (%1, %%"REG_a",4), %%mm0           \n\t"
+        "movq   8(%1, %%"REG_a",4), %%mm1           \n\t"
+        "pand                %%mm4, %%mm0           \n\t"
+        "pand                %%mm4, %%mm1           \n\t"
+        "packuswb            %%mm1, %%mm0           \n\t"
+        "movq                %%mm0, %%mm1           \n\t"
+        "psrlw                  $8, %%mm0           \n\t"
+        "pand                %%mm4, %%mm1           \n\t"
+        "packuswb            %%mm0, %%mm0           \n\t"
+        "packuswb            %%mm1, %%mm1           \n\t"
+        "movd                %%mm0, (%3, %%"REG_a") \n\t"
+        "movd                %%mm1, (%2, %%"REG_a") \n\t"
+        "add                    $4, %%"REG_a"       \n\t"
+        " js                    1b                  \n\t"
+        : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
+        : "%"REG_a
+    );
+#else
+    int i;
+    for (i=0; i<width; i++) {
+        dstU[i]= src1[4*i + 0];
+        dstV[i]= src1[4*i + 2];
+    }
+#endif
+    assert(src1 == src2);
+}
+
+static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    __asm__ volatile(
+        "movq "MANGLE(bm01010101)", %%mm4           \n\t"
+        "mov                    %0, %%"REG_a"       \n\t"
+        "1:                                         \n\t"
+        "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
+        "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
+        "movq    (%2, %%"REG_a",2), %%mm2           \n\t"
+        "movq   8(%2, %%"REG_a",2), %%mm3           \n\t"
+        "pand                %%mm4, %%mm0           \n\t"
+        "pand                %%mm4, %%mm1           \n\t"
+        "pand                %%mm4, %%mm2           \n\t"
+        "pand                %%mm4, %%mm3           \n\t"
+        "packuswb            %%mm1, %%mm0           \n\t"
+        "packuswb            %%mm3, %%mm2           \n\t"
+        "movq                %%mm0, (%3, %%"REG_a") \n\t"
+        "movq                %%mm2, (%4, %%"REG_a") \n\t"
+        "add                    $8, %%"REG_a"       \n\t"
+        " js                    1b                  \n\t"
+        : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
+        : "%"REG_a
+    );
+#else
+    int i;
+    for (i=0; i<width; i++) {
+        dstU[i]= src1[2*i];
+        dstV[i]= src2[2*i];
+    }
+#endif
+}
+
+static inline void RENAME(nvXXtoUV)(uint8_t *dst1, uint8_t *dst2,
+                                    const uint8_t *src, long width)
+{
+#if COMPILE_TEMPLATE_MMX
+    __asm__ volatile(
+        "movq "MANGLE(bm01010101)", %%mm4           \n\t"
+        "mov                    %0, %%"REG_a"       \n\t"
+        "1:                                         \n\t"
+        "movq    (%1, %%"REG_a",2), %%mm0           \n\t"
+        "movq   8(%1, %%"REG_a",2), %%mm1           \n\t"
+        "movq                %%mm0, %%mm2           \n\t"
+        "movq                %%mm1, %%mm3           \n\t"
+        "pand                %%mm4, %%mm0           \n\t"
+        "pand                %%mm4, %%mm1           \n\t"
+        "psrlw                  $8, %%mm2           \n\t"
+        "psrlw                  $8, %%mm3           \n\t"
+        "packuswb            %%mm1, %%mm0           \n\t"
+        "packuswb            %%mm3, %%mm2           \n\t"
+        "movq                %%mm0, (%2, %%"REG_a") \n\t"
+        "movq                %%mm2, (%3, %%"REG_a") \n\t"
+        "add                    $8, %%"REG_a"       \n\t"
+        " js                    1b                  \n\t"
+        : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst1+width), "r" (dst2+width)
+        : "%"REG_a
+    );
+#else
+    int i;
+    for (i = 0; i < width; i++) {
+        dst1[i] = src[2*i+0];
+        dst2[i] = src[2*i+1];
+    }
+#endif
+}
+
+static inline void RENAME(nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
+                                    const uint8_t *src1, const uint8_t *src2,
+                                    long width, uint32_t *unused)
+{
+    RENAME(nvXXtoUV)(dstU, dstV, src1, width);
+}
+
+static inline void RENAME(nv21ToUV)(uint8_t *dstU, uint8_t *dstV,
+                                    const uint8_t *src1, const uint8_t *src2,
+                                    long width, uint32_t *unused)
+{
+    RENAME(nvXXtoUV)(dstV, dstU, src1, width);
+}
+
+#if COMPILE_TEMPLATE_MMX
+static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, enum PixelFormat srcFormat)
+{
+
+    if(srcFormat == PIX_FMT_BGR24) {
+        __asm__ volatile(
+            "movq  "MANGLE(ff_bgr24toY1Coeff)", %%mm5       \n\t"
+            "movq  "MANGLE(ff_bgr24toY2Coeff)", %%mm6       \n\t"
+            :
+        );
+    } else {
+        __asm__ volatile(
+            "movq  "MANGLE(ff_rgb24toY1Coeff)", %%mm5       \n\t"
+            "movq  "MANGLE(ff_rgb24toY2Coeff)", %%mm6       \n\t"
+            :
+        );
+    }
+
+    __asm__ volatile(
+        "movq  "MANGLE(ff_bgr24toYOffset)", %%mm4   \n\t"
+        "mov                        %2, %%"REG_a"   \n\t"
+        "pxor                    %%mm7, %%mm7       \n\t"
+        "1:                                         \n\t"
+        PREFETCH"               64(%0)              \n\t"
+        "movd                     (%0), %%mm0       \n\t"
+        "movd                    2(%0), %%mm1       \n\t"
+        "movd                    6(%0), %%mm2       \n\t"
+        "movd                    8(%0), %%mm3       \n\t"
+        "add                       $12, %0          \n\t"
+        "punpcklbw               %%mm7, %%mm0       \n\t"
+        "punpcklbw               %%mm7, %%mm1       \n\t"
+        "punpcklbw               %%mm7, %%mm2       \n\t"
+        "punpcklbw               %%mm7, %%mm3       \n\t"
+        "pmaddwd                 %%mm5, %%mm0       \n\t"
+        "pmaddwd                 %%mm6, %%mm1       \n\t"
+        "pmaddwd                 %%mm5, %%mm2       \n\t"
+        "pmaddwd                 %%mm6, %%mm3       \n\t"
+        "paddd                   %%mm1, %%mm0       \n\t"
+        "paddd                   %%mm3, %%mm2       \n\t"
+        "paddd                   %%mm4, %%mm0       \n\t"
+        "paddd                   %%mm4, %%mm2       \n\t"
+        "psrad                     $15, %%mm0       \n\t"
+        "psrad                     $15, %%mm2       \n\t"
+        "packssdw                %%mm2, %%mm0       \n\t"
+        "packuswb                %%mm0, %%mm0       \n\t"
+        "movd                %%mm0, (%1, %%"REG_a") \n\t"
+        "add                        $4, %%"REG_a"   \n\t"
+        " js                        1b              \n\t"
+    : "+r" (src)
+    : "r" (dst+width), "g" ((x86_reg)-width)
+    : "%"REG_a
+    );
+}
+
+static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, long width, enum PixelFormat srcFormat)
+{
+    __asm__ volatile(
+        "movq                    24+%4, %%mm6       \n\t"
+        "mov                        %3, %%"REG_a"   \n\t"
+        "pxor                    %%mm7, %%mm7       \n\t"
+        "1:                                         \n\t"
+        PREFETCH"               64(%0)              \n\t"
+        "movd                     (%0), %%mm0       \n\t"
+        "movd                    2(%0), %%mm1       \n\t"
+        "punpcklbw               %%mm7, %%mm0       \n\t"
+        "punpcklbw               %%mm7, %%mm1       \n\t"
+        "movq                    %%mm0, %%mm2       \n\t"
+        "movq                    %%mm1, %%mm3       \n\t"
+        "pmaddwd                    %4, %%mm0       \n\t"
+        "pmaddwd                  8+%4, %%mm1       \n\t"
+        "pmaddwd                 16+%4, %%mm2       \n\t"
+        "pmaddwd                 %%mm6, %%mm3       \n\t"
+        "paddd                   %%mm1, %%mm0       \n\t"
+        "paddd                   %%mm3, %%mm2       \n\t"
+
+        "movd                    6(%0), %%mm1       \n\t"
+        "movd                    8(%0), %%mm3       \n\t"
+        "add                       $12, %0          \n\t"
+        "punpcklbw               %%mm7, %%mm1       \n\t"
+        "punpcklbw               %%mm7, %%mm3       \n\t"
+        "movq                    %%mm1, %%mm4       \n\t"
+        "movq                    %%mm3, %%mm5       \n\t"
+        "pmaddwd                    %4, %%mm1       \n\t"
+        "pmaddwd                  8+%4, %%mm3       \n\t"
+        "pmaddwd                 16+%4, %%mm4       \n\t"
+        "pmaddwd                 %%mm6, %%mm5       \n\t"
+        "paddd                   %%mm3, %%mm1       \n\t"
+        "paddd                   %%mm5, %%mm4       \n\t"
+
+        "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3       \n\t"
+        "paddd                   %%mm3, %%mm0       \n\t"
+        "paddd                   %%mm3, %%mm2       \n\t"
+        "paddd                   %%mm3, %%mm1       \n\t"
+        "paddd                   %%mm3, %%mm4       \n\t"
+        "psrad                     $15, %%mm0       \n\t"
+        "psrad                     $15, %%mm2       \n\t"
+        "psrad                     $15, %%mm1       \n\t"
+        "psrad                     $15, %%mm4       \n\t"
+        "packssdw                %%mm1, %%mm0       \n\t"
+        "packssdw                %%mm4, %%mm2       \n\t"
+        "packuswb                %%mm0, %%mm0       \n\t"
+        "packuswb                %%mm2, %%mm2       \n\t"
+        "movd                %%mm0, (%1, %%"REG_a") \n\t"
+        "movd                %%mm2, (%2, %%"REG_a") \n\t"
+        "add                        $4, %%"REG_a"   \n\t"
+        " js                        1b              \n\t"
+    : "+r" (src)
+    : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0])
+    : "%"REG_a
+    );
+}
+#endif
+
+static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
+#else
+    int i;
+    for (i=0; i<width; i++) {
+        int b= src[i*3+0];
+        int g= src[i*3+1];
+        int r= src[i*3+2];
+
+        dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+    }
+#endif /* COMPILE_TEMPLATE_MMX */
+}
+
+static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
+#else
+    int i;
+    for (i=0; i<width; i++) {
+        int b= src1[3*i + 0];
+        int g= src1[3*i + 1];
+        int r= src1[3*i + 2];
+
+        dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
+        dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
+    }
+#endif /* COMPILE_TEMPLATE_MMX */
+    assert(src1 == src2);
+}
+
+static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
+{
+    int i;
+    for (i=0; i<width; i++) {
+        int b= src1[6*i + 0] + src1[6*i + 3];
+        int g= src1[6*i + 1] + src1[6*i + 4];
+        int r= src1[6*i + 2] + src1[6*i + 5];
+
+        dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
+        dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
+    }
+    assert(src1 == src2);
+}
+
+static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
+#else
+    int i;
+    for (i=0; i<width; i++) {
+        int r= src[i*3+0];
+        int g= src[i*3+1];
+        int b= src[i*3+2];
+
+        dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+    }
+#endif
+}
+
+static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
+{
+#if COMPILE_TEMPLATE_MMX
+    assert(src1==src2);
+    RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
+#else
+    int i;
+    assert(src1==src2);
+    for (i=0; i<width; i++) {
+        int r= src1[3*i + 0];
+        int g= src1[3*i + 1];
+        int b= src1[3*i + 2];
+
+        dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
+        dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
+    }
+#endif
+}
+
+static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
+{
+    int i;
+    assert(src1==src2);
+    for (i=0; i<width; i++) {
+        int r= src1[6*i + 0] + src1[6*i + 3];
+        int g= src1[6*i + 1] + src1[6*i + 4];
+        int b= src1[6*i + 2] + src1[6*i + 5];
+
+        dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
+        dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
+    }
+}
+
+
+// bilinear / bicubic scaling
+static inline void RENAME(hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, int xInc,
+                                  const int16_t *filter, const int16_t *filterPos, long filterSize)
+{
+#if COMPILE_TEMPLATE_MMX
+    assert(filterSize % 4 == 0 && filterSize>0);
+    if (filterSize==4) { // Always true for upscaling, sometimes for down, too.
+        x86_reg counter= -2*dstW;
+        filter-= counter*2;
+        filterPos-= counter/2;
+        dst-= counter/2;
+        __asm__ volatile(
+#if defined(PIC)
+            "push            %%"REG_b"              \n\t"
+#endif
+            "pxor                %%mm7, %%mm7       \n\t"
+            "push           %%"REG_BP"              \n\t" // we use 7 regs here ...
+            "mov             %%"REG_a", %%"REG_BP"  \n\t"
+            ASMALIGN(4)
+            "1:                                     \n\t"
+            "movzwl   (%2, %%"REG_BP"), %%eax       \n\t"
+            "movzwl  2(%2, %%"REG_BP"), %%ebx       \n\t"
+            "movq  (%1, %%"REG_BP", 4), %%mm1       \n\t"
+            "movq 8(%1, %%"REG_BP", 4), %%mm3       \n\t"
+            "movd      (%3, %%"REG_a"), %%mm0       \n\t"
+            "movd      (%3, %%"REG_b"), %%mm2       \n\t"
+            "punpcklbw           %%mm7, %%mm0       \n\t"
+            "punpcklbw           %%mm7, %%mm2       \n\t"
+            "pmaddwd             %%mm1, %%mm0       \n\t"
+            "pmaddwd             %%mm2, %%mm3       \n\t"
+            "movq                %%mm0, %%mm4       \n\t"
+            "punpckldq           %%mm3, %%mm0       \n\t"
+            "punpckhdq           %%mm3, %%mm4       \n\t"
+            "paddd               %%mm4, %%mm0       \n\t"
+            "psrad                  $7, %%mm0       \n\t"
+            "packssdw            %%mm0, %%mm0       \n\t"
+            "movd                %%mm0, (%4, %%"REG_BP")    \n\t"
+            "add                    $4, %%"REG_BP"  \n\t"
+            " jnc                   1b              \n\t"
+
+            "pop            %%"REG_BP"              \n\t"
+#if defined(PIC)
+            "pop             %%"REG_b"              \n\t"
+#endif
+            : "+a" (counter)
+            : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
+#if !defined(PIC)
+            : "%"REG_b
+#endif
+        );
+    } else if (filterSize==8) {
+        x86_reg counter= -2*dstW;
+        filter-= counter*4;
+        filterPos-= counter/2;
+        dst-= counter/2;
+        __asm__ volatile(
+#if defined(PIC)
+            "push             %%"REG_b"             \n\t"
+#endif
+            "pxor                 %%mm7, %%mm7      \n\t"
+            "push            %%"REG_BP"             \n\t" // we use 7 regs here ...
+            "mov              %%"REG_a", %%"REG_BP" \n\t"
+            ASMALIGN(4)
+            "1:                                     \n\t"
+            "movzwl    (%2, %%"REG_BP"), %%eax      \n\t"
+            "movzwl   2(%2, %%"REG_BP"), %%ebx      \n\t"
+            "movq   (%1, %%"REG_BP", 8), %%mm1      \n\t"
+            "movq 16(%1, %%"REG_BP", 8), %%mm3      \n\t"
+            "movd       (%3, %%"REG_a"), %%mm0      \n\t"
+            "movd       (%3, %%"REG_b"), %%mm2      \n\t"
+            "punpcklbw            %%mm7, %%mm0      \n\t"
+            "punpcklbw            %%mm7, %%mm2      \n\t"
+            "pmaddwd              %%mm1, %%mm0      \n\t"
+            "pmaddwd              %%mm2, %%mm3      \n\t"
+
+            "movq  8(%1, %%"REG_BP", 8), %%mm1      \n\t"
+            "movq 24(%1, %%"REG_BP", 8), %%mm5      \n\t"
+            "movd      4(%3, %%"REG_a"), %%mm4      \n\t"
+            "movd      4(%3, %%"REG_b"), %%mm2      \n\t"
+            "punpcklbw            %%mm7, %%mm4      \n\t"
+            "punpcklbw            %%mm7, %%mm2      \n\t"
+            "pmaddwd              %%mm1, %%mm4      \n\t"
+            "pmaddwd              %%mm2, %%mm5      \n\t"
+            "paddd                %%mm4, %%mm0      \n\t"
+            "paddd                %%mm5, %%mm3      \n\t"
+            "movq                 %%mm0, %%mm4      \n\t"
+            "punpckldq            %%mm3, %%mm0      \n\t"
+            "punpckhdq            %%mm3, %%mm4      \n\t"
+            "paddd                %%mm4, %%mm0      \n\t"
+            "psrad                   $7, %%mm0      \n\t"
+            "packssdw             %%mm0, %%mm0      \n\t"
+            "movd                 %%mm0, (%4, %%"REG_BP")   \n\t"
+            "add                     $4, %%"REG_BP" \n\t"
+            " jnc                    1b             \n\t"
+
+            "pop             %%"REG_BP"             \n\t"
+#if defined(PIC)
+            "pop              %%"REG_b"             \n\t"
+#endif
+            : "+a" (counter)
+            : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
+#if !defined(PIC)
+            : "%"REG_b
+#endif
+        );
+    } else {
+        const uint8_t *offset = src+filterSize;
+        x86_reg counter= -2*dstW;
+        //filter-= counter*filterSize/2;
+        filterPos-= counter/2;
+        dst-= counter/2;
+        __asm__ volatile(
+            "pxor                  %%mm7, %%mm7     \n\t"
+            ASMALIGN(4)
+            "1:                                     \n\t"
+            "mov                      %2, %%"REG_c" \n\t"
+            "movzwl      (%%"REG_c", %0), %%eax     \n\t"
+            "movzwl     2(%%"REG_c", %0), %%edx     \n\t"
+            "mov                      %5, %%"REG_c" \n\t"
+            "pxor                  %%mm4, %%mm4     \n\t"
+            "pxor                  %%mm5, %%mm5     \n\t"
+            "2:                                     \n\t"
+            "movq                   (%1), %%mm1     \n\t"
+            "movq               (%1, %6), %%mm3     \n\t"
+            "movd (%%"REG_c", %%"REG_a"), %%mm0     \n\t"
+            "movd (%%"REG_c", %%"REG_d"), %%mm2     \n\t"
+            "punpcklbw             %%mm7, %%mm0     \n\t"
+            "punpcklbw             %%mm7, %%mm2     \n\t"
+            "pmaddwd               %%mm1, %%mm0     \n\t"
+            "pmaddwd               %%mm2, %%mm3     \n\t"
+            "paddd                 %%mm3, %%mm5     \n\t"
+            "paddd                 %%mm0, %%mm4     \n\t"
+            "add                      $8, %1        \n\t"
+            "add                      $4, %%"REG_c" \n\t"
+            "cmp                      %4, %%"REG_c" \n\t"
+            " jb                      2b            \n\t"
+            "add                      %6, %1        \n\t"
+            "movq                  %%mm4, %%mm0     \n\t"
+            "punpckldq             %%mm5, %%mm4     \n\t"
+            "punpckhdq             %%mm5, %%mm0     \n\t"
+            "paddd                 %%mm0, %%mm4     \n\t"
+            "psrad                    $7, %%mm4     \n\t"
+            "packssdw              %%mm4, %%mm4     \n\t"
+            "mov                      %3, %%"REG_a" \n\t"
+            "movd                  %%mm4, (%%"REG_a", %0)   \n\t"
+            "add                      $4, %0        \n\t"
+            " jnc                     1b            \n\t"
+
+            : "+r" (counter), "+r" (filter)
+            : "m" (filterPos), "m" (dst), "m"(offset),
+            "m" (src), "r" ((x86_reg)filterSize*2)
+            : "%"REG_a, "%"REG_c, "%"REG_d
+        );
+    }
+#else
+#if COMPILE_TEMPLATE_ALTIVEC
+    hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
+#else
+    int i;
+    for (i=0; i<dstW; i++) {
+        int j;
+        int srcPos= filterPos[i];
+        int val=0;
+        //printf("filterPos: %d\n", filterPos[i]);
+        for (j=0; j<filterSize; j++) {
+            //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
+            val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+        }
+        //filter += hFilterSize;
+        dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
+        //dst[i] = val>>7;
+    }
+#endif /* COMPILE_TEMPLATE_ALTIVEC */
+#endif /* COMPILE_MMX */
+}
+
+//FIXME all pal and rgb srcFormats could do this convertion as well
+//FIXME all scalers more complex than bilinear could do half of this transform
+static void RENAME(chrRangeToJpeg)(uint16_t *dst, int width)
+{
+    int i;
+    for (i = 0; i < width; i++) {
+        dst[i     ] = (FFMIN(dst[i     ],30775)*4663 - 9289992)>>12; //-264
+        dst[i+VOFW] = (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
+    }
+}
+static void RENAME(chrRangeFromJpeg)(uint16_t *dst, int width)
+{
+    int i;
+    for (i = 0; i < width; i++) {
+        dst[i     ] = (dst[i     ]*1799 + 4081085)>>11; //1469
+        dst[i+VOFW] = (dst[i+VOFW]*1799 + 4081085)>>11; //1469
+    }
+}
+static void RENAME(lumRangeToJpeg)(uint16_t *dst, int width)
+{
+    int i;
+    for (i = 0; i < width; i++)
+        dst[i] = (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
+}
+static void RENAME(lumRangeFromJpeg)(uint16_t *dst, int width)
+{
+    int i;
+    for (i = 0; i < width; i++)
+        dst[i] = (dst[i]*14071 + 33561947)>>14;
+}
+
+#define FAST_BILINEAR_X86 \
+    "subl    %%edi, %%esi    \n\t" /*  src[xx+1] - src[xx] */                   \
+    "imull   %%ecx, %%esi    \n\t" /* (src[xx+1] - src[xx])*xalpha */           \
+    "shll      $16, %%edi    \n\t"                                              \
+    "addl    %%edi, %%esi    \n\t" /* src[xx+1]*xalpha + src[xx]*(1-xalpha) */  \
+    "mov        %1, %%"REG_D"\n\t"                                              \
+    "shrl       $9, %%esi    \n\t"                                              \
+
+static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
+                                        long dstWidth, const uint8_t *src, int srcW,
+                                        int xInc)
+{
+#if ARCH_X86
+#if COMPILE_TEMPLATE_MMX2
+    int32_t *filterPos = c->hLumFilterPos;
+    int16_t *filter    = c->hLumFilter;
+    int     canMMX2BeUsed  = c->canMMX2BeUsed;
+    void    *mmx2FilterCode= c->lumMmx2FilterCode;
+    int i;
+#if defined(PIC)
+    DECLARE_ALIGNED(8, uint64_t, ebxsave);
+#endif
+    if (canMMX2BeUsed) {
+        __asm__ volatile(
+#if defined(PIC)
+            "mov               %%"REG_b", %5        \n\t"
+#endif
+            "pxor                  %%mm7, %%mm7     \n\t"
+            "mov                      %0, %%"REG_c" \n\t"
+            "mov                      %1, %%"REG_D" \n\t"
+            "mov                      %2, %%"REG_d" \n\t"
+            "mov                      %3, %%"REG_b" \n\t"
+            "xor               %%"REG_a", %%"REG_a" \n\t" // i
+            PREFETCH"        (%%"REG_c")            \n\t"
+            PREFETCH"      32(%%"REG_c")            \n\t"
+            PREFETCH"      64(%%"REG_c")            \n\t"
+
+#if ARCH_X86_64
+
+#define CALL_MMX2_FILTER_CODE \
+            "movl            (%%"REG_b"), %%esi     \n\t"\
+            "call                    *%4            \n\t"\
+            "movl (%%"REG_b", %%"REG_a"), %%esi     \n\t"\
+            "add               %%"REG_S", %%"REG_c" \n\t"\
+            "add               %%"REG_a", %%"REG_D" \n\t"\
+            "xor               %%"REG_a", %%"REG_a" \n\t"\
+
+#else
+
+#define CALL_MMX2_FILTER_CODE \
+            "movl (%%"REG_b"), %%esi        \n\t"\
+            "call         *%4                       \n\t"\
+            "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
+            "add               %%"REG_a", %%"REG_D" \n\t"\
+            "xor               %%"REG_a", %%"REG_a" \n\t"\
+
+#endif /* ARCH_X86_64 */
+
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+
+#if defined(PIC)
+            "mov                      %5, %%"REG_b" \n\t"
+#endif
+            :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
+            "m" (mmx2FilterCode)
+#if defined(PIC)
+            ,"m" (ebxsave)
+#endif
+            : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+#if !defined(PIC)
+            ,"%"REG_b
+#endif
+        );
+        for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
+    } else {
+#endif /* COMPILE_TEMPLATE_MMX2 */
+    x86_reg xInc_shr16 = xInc >> 16;
+    uint16_t xInc_mask = xInc & 0xffff;
+    //NO MMX just normal asm ...
+    __asm__ volatile(
+        "xor %%"REG_a", %%"REG_a"            \n\t" // i
+        "xor %%"REG_d", %%"REG_d"            \n\t" // xx
+        "xorl    %%ecx, %%ecx                \n\t" // xalpha
+        ASMALIGN(4)
+        "1:                                  \n\t"
+        "movzbl    (%0, %%"REG_d"), %%edi    \n\t" //src[xx]
+        "movzbl   1(%0, %%"REG_d"), %%esi    \n\t" //src[xx+1]
+        FAST_BILINEAR_X86
+        "movw     %%si, (%%"REG_D", %%"REG_a", 2)   \n\t"
+        "addw       %4, %%cx                 \n\t" //xalpha += xInc&0xFFFF
+        "adc        %3, %%"REG_d"            \n\t" //xx+= xInc>>16 + carry
+
+        "movzbl    (%0, %%"REG_d"), %%edi    \n\t" //src[xx]
+        "movzbl   1(%0, %%"REG_d"), %%esi    \n\t" //src[xx+1]
+        FAST_BILINEAR_X86
+        "movw     %%si, 2(%%"REG_D", %%"REG_a", 2)  \n\t"
+        "addw       %4, %%cx                 \n\t" //xalpha += xInc&0xFFFF
+        "adc        %3, %%"REG_d"            \n\t" //xx+= xInc>>16 + carry
+
+
+        "add        $2, %%"REG_a"            \n\t"
+        "cmp        %2, %%"REG_a"            \n\t"
+        " jb        1b                       \n\t"
+
+
+        :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
+        : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
+    );
+#if COMPILE_TEMPLATE_MMX2
+    } //if MMX2 can't be used
+#endif
+#else
+    int i;
+    unsigned int xpos=0;
+    for (i=0;i<dstWidth;i++) {
+        register unsigned int xx=xpos>>16;
+        register unsigned int xalpha=(xpos&0xFFFF)>>9;
+        dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
+        xpos+=xInc;
+    }
+#endif /* ARCH_X86 */
+}
+
+      // *** horizontal scale Y line to temp buffer
+static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src, int srcW, int xInc,
+                                   const int16_t *hLumFilter,
+                                   const int16_t *hLumFilterPos, int hLumFilterSize,
+                                   uint8_t *formatConvBuffer,
+                                   uint32_t *pal, int isAlpha)
+{
+    void (*toYV12)(uint8_t *, const uint8_t *, long, uint32_t *) = isAlpha ? c->alpToYV12 : c->lumToYV12;
+    void (*convertRange)(uint16_t *, int) = isAlpha ? NULL : c->lumConvertRange;
+
+    src += isAlpha ? c->alpSrcOffset : c->lumSrcOffset;
+
+    if (toYV12) {
+        toYV12(formatConvBuffer, src, srcW, pal);
+        src= formatConvBuffer;
+    }
+
+    if (!c->hyscale_fast) {
+        c->hScale(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
+    } else { // fast bilinear upscale / crap downscale
+        c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc);
+    }
+
+    if (convertRange)
+        convertRange(dst, dstWidth);
+}
+
+static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
+                                        long dstWidth, const uint8_t *src1,
+                                        const uint8_t *src2, int srcW, int xInc)
+{
+#if ARCH_X86
+#if COMPILE_TEMPLATE_MMX2
+    int32_t *filterPos = c->hChrFilterPos;
+    int16_t *filter    = c->hChrFilter;
+    int     canMMX2BeUsed  = c->canMMX2BeUsed;
+    void    *mmx2FilterCode= c->chrMmx2FilterCode;
+    int i;
+#if defined(PIC)
+    DECLARE_ALIGNED(8, uint64_t, ebxsave);
+#endif
+    if (canMMX2BeUsed) {
+        __asm__ volatile(
+#if defined(PIC)
+            "mov          %%"REG_b", %6         \n\t"
+#endif
+            "pxor             %%mm7, %%mm7      \n\t"
+            "mov                 %0, %%"REG_c"  \n\t"
+            "mov                 %1, %%"REG_D"  \n\t"
+            "mov                 %2, %%"REG_d"  \n\t"
+            "mov                 %3, %%"REG_b"  \n\t"
+            "xor          %%"REG_a", %%"REG_a"  \n\t" // i
+            PREFETCH"   (%%"REG_c")             \n\t"
+            PREFETCH" 32(%%"REG_c")             \n\t"
+            PREFETCH" 64(%%"REG_c")             \n\t"
+
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            "xor          %%"REG_a", %%"REG_a"  \n\t" // i
+            "mov                 %5, %%"REG_c"  \n\t" // src
+            "mov                 %1, %%"REG_D"  \n\t" // buf1
+            "add              $"AV_STRINGIFY(VOF)", %%"REG_D"  \n\t"
+            PREFETCH"   (%%"REG_c")             \n\t"
+            PREFETCH" 32(%%"REG_c")             \n\t"
+            PREFETCH" 64(%%"REG_c")             \n\t"
+
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+            CALL_MMX2_FILTER_CODE
+
+#if defined(PIC)
+            "mov %6, %%"REG_b"    \n\t"
+#endif
+            :: "m" (src1), "m" (dst), "m" (filter), "m" (filterPos),
+            "m" (mmx2FilterCode), "m" (src2)
+#if defined(PIC)
+            ,"m" (ebxsave)
+#endif
+            : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+#if !defined(PIC)
+            ,"%"REG_b
+#endif
+        );
+        for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
+            //printf("%d %d %d\n", dstWidth, i, srcW);
+            dst[i] = src1[srcW-1]*128;
+            dst[i+VOFW] = src2[srcW-1]*128;
+        }
+    } else {
+#endif /* COMPILE_TEMPLATE_MMX2 */
+        x86_reg xInc_shr16 = (x86_reg) (xInc >> 16);
+        uint16_t xInc_mask = xInc & 0xffff;
+        __asm__ volatile(
+            "xor %%"REG_a", %%"REG_a"               \n\t" // i
+            "xor %%"REG_d", %%"REG_d"               \n\t" // xx
+            "xorl    %%ecx, %%ecx                   \n\t" // xalpha
+            ASMALIGN(4)
+            "1:                                     \n\t"
+            "mov        %0, %%"REG_S"               \n\t"
+            "movzbl  (%%"REG_S", %%"REG_d"), %%edi  \n\t" //src[xx]
+            "movzbl 1(%%"REG_S", %%"REG_d"), %%esi  \n\t" //src[xx+1]
+            FAST_BILINEAR_X86
+            "movw     %%si, (%%"REG_D", %%"REG_a", 2)   \n\t"
+
+            "movzbl    (%5, %%"REG_d"), %%edi       \n\t" //src[xx]
+            "movzbl   1(%5, %%"REG_d"), %%esi       \n\t" //src[xx+1]
+            FAST_BILINEAR_X86
+            "movw     %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2)   \n\t"
+
+            "addw       %4, %%cx                    \n\t" //xalpha += xInc&0xFFFF
+            "adc        %3, %%"REG_d"               \n\t" //xx+= xInc>>16 + carry
+            "add        $1, %%"REG_a"               \n\t"
+            "cmp        %2, %%"REG_a"               \n\t"
+            " jb        1b                          \n\t"
+
+/* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
+which is needed to support GCC 4.0. */
+#if ARCH_X86_64 && AV_GCC_VERSION_AT_LEAST(3,4)
+            :: "m" (src1), "m" (dst), "g" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
+#else
+            :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
+#endif
+            "r" (src2)
+            : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
+        );
+#if COMPILE_TEMPLATE_MMX2
+    } //if MMX2 can't be used
+#endif
+#else
+    int i;
+    unsigned int xpos=0;
+    for (i=0;i<dstWidth;i++) {
+        register unsigned int xx=xpos>>16;
+        register unsigned int xalpha=(xpos&0xFFFF)>>9;
+        dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
+        dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
+        /* slower
+        dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
+        dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
+        */
+        xpos+=xInc;
+    }
+#endif /* ARCH_X86 */
+}
+
+inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src1, const uint8_t *src2,
+                                   int srcW, int xInc, const int16_t *hChrFilter,
+                                   const int16_t *hChrFilterPos, int hChrFilterSize,
+                                   uint8_t *formatConvBuffer,
+                                   uint32_t *pal)
+{
+
+    src1 += c->chrSrcOffset;
+    src2 += c->chrSrcOffset;
+
+    if (c->chrToYV12) {
+        c->chrToYV12(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
+        src1= formatConvBuffer;
+        src2= formatConvBuffer+VOFW;
+    }
+
+    if (!c->hcscale_fast) {
+        c->hScale(dst     , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
+        c->hScale(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
+    } else { // fast bilinear upscale / crap downscale
+        c->hcscale_fast(c, dst, dstWidth, src1, src2, srcW, xInc);
+    }
+
+    if (c->chrConvertRange)
+        c->chrConvertRange(dst, dstWidth);
+}
+
+#define DEBUG_SWSCALE_BUFFERS 0
+#define DEBUG_BUFFERS(...) if (DEBUG_SWSCALE_BUFFERS) av_log(c, AV_LOG_DEBUG, __VA_ARGS__)
+
+static int RENAME(swScale)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                           int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    /* load a few things into local vars to make the code more readable? and faster */
+    const int srcW= c->srcW;
+    const int dstW= c->dstW;
+    const int dstH= c->dstH;
+    const int chrDstW= c->chrDstW;
+    const int chrSrcW= c->chrSrcW;
+    const int lumXInc= c->lumXInc;
+    const int chrXInc= c->chrXInc;
+    const enum PixelFormat dstFormat= c->dstFormat;
+    const int flags= c->flags;
+    int16_t *vLumFilterPos= c->vLumFilterPos;
+    int16_t *vChrFilterPos= c->vChrFilterPos;
+    int16_t *hLumFilterPos= c->hLumFilterPos;
+    int16_t *hChrFilterPos= c->hChrFilterPos;
+    int16_t *vLumFilter= c->vLumFilter;
+    int16_t *vChrFilter= c->vChrFilter;
+    int16_t *hLumFilter= c->hLumFilter;
+    int16_t *hChrFilter= c->hChrFilter;
+    int32_t *lumMmxFilter= c->lumMmxFilter;
+    int32_t *chrMmxFilter= c->chrMmxFilter;
+    int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
+    const int vLumFilterSize= c->vLumFilterSize;
+    const int vChrFilterSize= c->vChrFilterSize;
+    const int hLumFilterSize= c->hLumFilterSize;
+    const int hChrFilterSize= c->hChrFilterSize;
+    int16_t **lumPixBuf= c->lumPixBuf;
+    int16_t **chrPixBuf= c->chrPixBuf;
+    int16_t **alpPixBuf= c->alpPixBuf;
+    const int vLumBufSize= c->vLumBufSize;
+    const int vChrBufSize= c->vChrBufSize;
+    uint8_t *formatConvBuffer= c->formatConvBuffer;
+    const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
+    const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
+    int lastDstY;
+    uint32_t *pal=c->pal_yuv;
+
+    /* vars which will change and which we need to store back in the context */
+    int dstY= c->dstY;
+    int lumBufIndex= c->lumBufIndex;
+    int chrBufIndex= c->chrBufIndex;
+    int lastInLumBuf= c->lastInLumBuf;
+    int lastInChrBuf= c->lastInChrBuf;
+
+    if (isPacked(c->srcFormat)) {
+        src[0]=
+        src[1]=
+        src[2]=
+        src[3]= src[0];
+        srcStride[0]=
+        srcStride[1]=
+        srcStride[2]=
+        srcStride[3]= srcStride[0];
+    }
+    srcStride[1]<<= c->vChrDrop;
+    srcStride[2]<<= c->vChrDrop;
+
+    DEBUG_BUFFERS("swScale() %p[%d] %p[%d] %p[%d] %p[%d] -> %p[%d] %p[%d] %p[%d] %p[%d]\n",
+                  src[0], srcStride[0], src[1], srcStride[1], src[2], srcStride[2], src[3], srcStride[3],
+                  dst[0], dstStride[0], dst[1], dstStride[1], dst[2], dstStride[2], dst[3], dstStride[3]);
+    DEBUG_BUFFERS("srcSliceY: %d srcSliceH: %d dstY: %d dstH: %d\n",
+                   srcSliceY,    srcSliceH,    dstY,    dstH);
+    DEBUG_BUFFERS("vLumFilterSize: %d vLumBufSize: %d vChrFilterSize: %d vChrBufSize: %d\n",
+                   vLumFilterSize,    vLumBufSize,    vChrFilterSize,    vChrBufSize);
+
+    if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0 || dstStride[3]%8 != 0) {
+        static int warnedAlready=0; //FIXME move this into the context perhaps
+        if (flags & SWS_PRINT_INFO && !warnedAlready) {
+            av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
+                   "         ->cannot do aligned memory accesses anymore\n");
+            warnedAlready=1;
+        }
+    }
+
+    /* Note the user might start scaling the picture in the middle so this
+       will not get executed. This is not really intended but works
+       currently, so people might do it. */
+    if (srcSliceY ==0) {
+        lumBufIndex=-1;
+        chrBufIndex=-1;
+        dstY=0;
+        lastInLumBuf= -1;
+        lastInChrBuf= -1;
+    }
+
+    lastDstY= dstY;
+
+    for (;dstY < dstH; dstY++) {
+        unsigned char *dest =dst[0]+dstStride[0]*dstY;
+        const int chrDstY= dstY>>c->chrDstVSubSample;
+        unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
+        unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
+        unsigned char *aDest=(CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3]+dstStride[3]*dstY : NULL;
+
+        const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
+        const int firstLumSrcY2= vLumFilterPos[FFMIN(dstY | ((1<<c->chrDstVSubSample) - 1), dstH-1)];
+        const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
+        int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
+        int lastLumSrcY2=firstLumSrcY2+ vLumFilterSize -1; // Last line needed as input
+        int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
+        int enough_lines;
+
+        //handle holes (FAST_BILINEAR & weird filters)
+        if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
+        if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
+        assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
+        assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
+
+        DEBUG_BUFFERS("dstY: %d\n", dstY);
+        DEBUG_BUFFERS("\tfirstLumSrcY: %d lastLumSrcY: %d lastInLumBuf: %d\n",
+                         firstLumSrcY,    lastLumSrcY,    lastInLumBuf);
+        DEBUG_BUFFERS("\tfirstChrSrcY: %d lastChrSrcY: %d lastInChrBuf: %d\n",
+                         firstChrSrcY,    lastChrSrcY,    lastInChrBuf);
+
+        // Do we have enough lines in this slice to output the dstY line
+        enough_lines = lastLumSrcY2 < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample);
+
+        if (!enough_lines) {
+            lastLumSrcY = srcSliceY + srcSliceH - 1;
+            lastChrSrcY = chrSrcSliceY + chrSrcSliceH - 1;
+            DEBUG_BUFFERS("buffering slice: lastLumSrcY %d lastChrSrcY %d\n",
+                                            lastLumSrcY, lastChrSrcY);
+        }
+
+        //Do horizontal scaling
+        while(lastInLumBuf < lastLumSrcY) {
+            const uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
+            const uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
+            lumBufIndex++;
+            assert(lumBufIndex < 2*vLumBufSize);
+            assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
+            assert(lastInLumBuf + 1 - srcSliceY >= 0);
+            RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
+                            hLumFilter, hLumFilterPos, hLumFilterSize,
+                            formatConvBuffer,
+                            pal, 0);
+            if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
+                RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
+                                hLumFilter, hLumFilterPos, hLumFilterSize,
+                                formatConvBuffer,
+                                pal, 1);
+            lastInLumBuf++;
+            DEBUG_BUFFERS("\t\tlumBufIndex %d: lastInLumBuf: %d\n",
+                               lumBufIndex,    lastInLumBuf);
+        }
+        while(lastInChrBuf < lastChrSrcY) {
+            const uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
+            const uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
+            chrBufIndex++;
+            assert(chrBufIndex < 2*vChrBufSize);
+            assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
+            assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
+            //FIXME replace parameters through context struct (some at least)
+
+            if (c->needs_hcscale)
+                RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
+                                hChrFilter, hChrFilterPos, hChrFilterSize,
+                                formatConvBuffer,
+                                pal);
+            lastInChrBuf++;
+            DEBUG_BUFFERS("\t\tchrBufIndex %d: lastInChrBuf: %d\n",
+                               chrBufIndex,    lastInChrBuf);
+        }
+        //wrap buf index around to stay inside the ring buffer
+        if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
+        if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
+        if (!enough_lines)
+            break; //we can't output a dstY line so let's try with the next slice
+
+#if COMPILE_TEMPLATE_MMX
+        c->blueDither= ff_dither8[dstY&1];
+        if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
+            c->greenDither= ff_dither8[dstY&1];
+        else
+            c->greenDither= ff_dither4[dstY&1];
+        c->redDither= ff_dither8[(dstY+1)&1];
+#endif
+        if (dstY < dstH-2) {
+            const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
+            const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
+            const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
+#if COMPILE_TEMPLATE_MMX
+            int i;
+            if (flags & SWS_ACCURATE_RND) {
+                int s= APCK_SIZE / 8;
+                for (i=0; i<vLumFilterSize; i+=2) {
+                    *(const void**)&lumMmxFilter[s*i              ]= lumSrcPtr[i  ];
+                    *(const void**)&lumMmxFilter[s*i+APCK_PTR2/4  ]= lumSrcPtr[i+(vLumFilterSize>1)];
+                              lumMmxFilter[s*i+APCK_COEF/4  ]=
+                              lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i    ]
+                        + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
+                    if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
+                        *(const void**)&alpMmxFilter[s*i              ]= alpSrcPtr[i  ];
+                        *(const void**)&alpMmxFilter[s*i+APCK_PTR2/4  ]= alpSrcPtr[i+(vLumFilterSize>1)];
+                                  alpMmxFilter[s*i+APCK_COEF/4  ]=
+                                  alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4  ];
+                    }
+                }
+                for (i=0; i<vChrFilterSize; i+=2) {
+                    *(const void**)&chrMmxFilter[s*i              ]= chrSrcPtr[i  ];
+                    *(const void**)&chrMmxFilter[s*i+APCK_PTR2/4  ]= chrSrcPtr[i+(vChrFilterSize>1)];
+                              chrMmxFilter[s*i+APCK_COEF/4  ]=
+                              chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i    ]
+                        + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
+                }
+            } else {
+                for (i=0; i<vLumFilterSize; i++) {
+                    lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
+                    lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
+                    lumMmxFilter[4*i+2]=
+                    lumMmxFilter[4*i+3]=
+                        ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
+                    if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
+                        alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
+                        alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
+                        alpMmxFilter[4*i+2]=
+                        alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
+                    }
+                }
+                for (i=0; i<vChrFilterSize; i++) {
+                    chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
+                    chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
+                    chrMmxFilter[4*i+2]=
+                    chrMmxFilter[4*i+3]=
+                        ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
+                }
+            }
+#endif
+            if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) {
+                const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+                if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
+                c->yuv2nv12X(c,
+                             vLumFilter+dstY*vLumFilterSize   , lumSrcPtr, vLumFilterSize,
+                             vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                             dest, uDest, dstW, chrDstW, dstFormat);
+            } else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12 like
+                const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+                if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
+                if (is16BPS(dstFormat)) {
+                    yuv2yuvX16inC(
+                                  vLumFilter+dstY*vLumFilterSize   , lumSrcPtr, vLumFilterSize,
+                                  vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                  alpSrcPtr, (uint16_t *) dest, (uint16_t *) uDest, (uint16_t *) vDest, (uint16_t *) aDest, dstW, chrDstW,
+                                  dstFormat);
+                } else if (vLumFilterSize == 1 && vChrFilterSize == 1) { // unscaled YV12
+                    const int16_t *lumBuf = lumSrcPtr[0];
+                    const int16_t *chrBuf= chrSrcPtr[0];
+                    const int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpSrcPtr[0] : NULL;
+                    c->yuv2yuv1(c, lumBuf, chrBuf, alpBuf, dest, uDest, vDest, aDest, dstW, chrDstW);
+                } else { //General YV12
+                    c->yuv2yuvX(c,
+                                vLumFilter+dstY*vLumFilterSize   , lumSrcPtr, vLumFilterSize,
+                                vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
+                }
+            } else {
+                assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
+                assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
+                if (vLumFilterSize == 1 && vChrFilterSize == 2) { //unscaled RGB
+                    int chrAlpha= vChrFilter[2*dstY+1];
+                    if(flags & SWS_FULL_CHR_H_INT) {
+                        yuv2rgbXinC_full(c, //FIXME write a packed1_full function
+                                         vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+                                         vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                         alpSrcPtr, dest, dstW, dstY);
+                    } else {
+                        c->yuv2packed1(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
+                                       alpPixBuf ? *alpSrcPtr : NULL,
+                                       dest, dstW, chrAlpha, dstFormat, flags, dstY);
+                    }
+                } else if (vLumFilterSize == 2 && vChrFilterSize == 2) { //bilinear upscale RGB
+                    int lumAlpha= vLumFilter[2*dstY+1];
+                    int chrAlpha= vChrFilter[2*dstY+1];
+                    lumMmxFilter[2]=
+                    lumMmxFilter[3]= vLumFilter[2*dstY   ]*0x10001;
+                    chrMmxFilter[2]=
+                    chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
+                    if(flags & SWS_FULL_CHR_H_INT) {
+                        yuv2rgbXinC_full(c, //FIXME write a packed2_full function
+                                         vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+                                         vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                         alpSrcPtr, dest, dstW, dstY);
+                    } else {
+                        c->yuv2packed2(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
+                                       alpPixBuf ? *alpSrcPtr : NULL, alpPixBuf ? *(alpSrcPtr+1) : NULL,
+                                       dest, dstW, lumAlpha, chrAlpha, dstY);
+                    }
+                } else { //general RGB
+                    if(flags & SWS_FULL_CHR_H_INT) {
+                        yuv2rgbXinC_full(c,
+                                         vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+                                         vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                         alpSrcPtr, dest, dstW, dstY);
+                    } else {
+                        c->yuv2packedX(c,
+                                       vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+                                       vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                       alpSrcPtr, dest, dstW, dstY);
+                    }
+                }
+            }
+        } else { // hmm looks like we can't use MMX here without overwriting this array's tail
+            const int16_t **lumSrcPtr= (const int16_t **)lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
+            const int16_t **chrSrcPtr= (const int16_t **)chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
+            const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
+            if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21) {
+                const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+                if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
+                yuv2nv12XinC(
+                             vLumFilter+dstY*vLumFilterSize   , lumSrcPtr, vLumFilterSize,
+                             vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                             dest, uDest, dstW, chrDstW, dstFormat);
+            } else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) { //YV12
+                const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+                if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
+                if (is16BPS(dstFormat)) {
+                    yuv2yuvX16inC(
+                                  vLumFilter+dstY*vLumFilterSize   , lumSrcPtr, vLumFilterSize,
+                                  vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                  alpSrcPtr, (uint16_t *) dest, (uint16_t *) uDest, (uint16_t *) vDest, (uint16_t *) aDest, dstW, chrDstW,
+                                  dstFormat);
+                } else {
+                    yuv2yuvXinC(
+                                vLumFilter+dstY*vLumFilterSize   , lumSrcPtr, vLumFilterSize,
+                                vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
+                }
+            } else {
+                assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
+                assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
+                if(flags & SWS_FULL_CHR_H_INT) {
+                    yuv2rgbXinC_full(c,
+                                     vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+                                     vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                     alpSrcPtr, dest, dstW, dstY);
+                } else {
+                    yuv2packedXinC(c,
+                                   vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+                                   vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+                                   alpSrcPtr, dest, dstW, dstY);
+                }
+            }
+        }
+    }
+
+    if ((dstFormat == PIX_FMT_YUVA420P) && !alpPixBuf)
+        fillPlane(dst[3], dstStride[3], dstW, dstY-lastDstY, lastDstY, 255);
+
+#if COMPILE_TEMPLATE_MMX
+    if (flags & SWS_CPU_CAPS_MMX2 )  __asm__ volatile("sfence":::"memory");
+    /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
+    if (flags & SWS_CPU_CAPS_3DNOW)  __asm__ volatile("femms" :::"memory");
+    else                             __asm__ volatile("emms"  :::"memory");
+#endif
+    /* store changed local vars back in the context */
+    c->dstY= dstY;
+    c->lumBufIndex= lumBufIndex;
+    c->chrBufIndex= chrBufIndex;
+    c->lastInLumBuf= lastInLumBuf;
+    c->lastInChrBuf= lastInChrBuf;
+
+    return dstY - lastDstY;
+}
+
+static void RENAME(sws_init_swScale)(SwsContext *c)
+{
+    enum PixelFormat srcFormat = c->srcFormat;
+
+    c->yuv2nv12X    = RENAME(yuv2nv12X   );
+    c->yuv2yuv1     = RENAME(yuv2yuv1    );
+    c->yuv2yuvX     = RENAME(yuv2yuvX    );
+    c->yuv2packed1  = RENAME(yuv2packed1 );
+    c->yuv2packed2  = RENAME(yuv2packed2 );
+    c->yuv2packedX  = RENAME(yuv2packedX );
+
+    c->hScale       = RENAME(hScale      );
+
+#if COMPILE_TEMPLATE_MMX
+    // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
+    if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
+#else
+    if (c->flags & SWS_FAST_BILINEAR)
+#endif
+    {
+        c->hyscale_fast = RENAME(hyscale_fast);
+        c->hcscale_fast = RENAME(hcscale_fast);
+    }
+
+    c->chrToYV12 = NULL;
+    switch(srcFormat) {
+        case PIX_FMT_YUYV422  : c->chrToYV12 = RENAME(yuy2ToUV); break;
+        case PIX_FMT_UYVY422  : c->chrToYV12 = RENAME(uyvyToUV); break;
+        case PIX_FMT_NV12     : c->chrToYV12 = RENAME(nv12ToUV); break;
+        case PIX_FMT_NV21     : c->chrToYV12 = RENAME(nv21ToUV); break;
+        case PIX_FMT_RGB8     :
+        case PIX_FMT_BGR8     :
+        case PIX_FMT_PAL8     :
+        case PIX_FMT_BGR4_BYTE:
+        case PIX_FMT_RGB4_BYTE: c->chrToYV12 = palToUV; break;
+        case PIX_FMT_YUV420P16BE:
+        case PIX_FMT_YUV422P16BE:
+        case PIX_FMT_YUV444P16BE: c->chrToYV12 = RENAME(BEToUV); break;
+        case PIX_FMT_YUV420P16LE:
+        case PIX_FMT_YUV422P16LE:
+        case PIX_FMT_YUV444P16LE: c->chrToYV12 = RENAME(LEToUV); break;
+    }
+    if (c->chrSrcHSubSample) {
+        switch(srcFormat) {
+        case PIX_FMT_RGB48BE:
+        case PIX_FMT_RGB48LE: c->chrToYV12 = rgb48ToUV_half; break;
+        case PIX_FMT_RGB32  :
+        case PIX_FMT_RGB32_1: c->chrToYV12 = bgr32ToUV_half; break;
+        case PIX_FMT_BGR24  : c->chrToYV12 = RENAME(bgr24ToUV_half); break;
+        case PIX_FMT_BGR565 : c->chrToYV12 = bgr16ToUV_half; break;
+        case PIX_FMT_BGR555 : c->chrToYV12 = bgr15ToUV_half; break;
+        case PIX_FMT_BGR32  :
+        case PIX_FMT_BGR32_1: c->chrToYV12 = rgb32ToUV_half; break;
+        case PIX_FMT_RGB24  : c->chrToYV12 = RENAME(rgb24ToUV_half); break;
+        case PIX_FMT_RGB565 : c->chrToYV12 = rgb16ToUV_half; break;
+        case PIX_FMT_RGB555 : c->chrToYV12 = rgb15ToUV_half; break;
+        }
+    } else {
+        switch(srcFormat) {
+        case PIX_FMT_RGB48BE:
+        case PIX_FMT_RGB48LE: c->chrToYV12 = rgb48ToUV; break;
+        case PIX_FMT_RGB32  :
+        case PIX_FMT_RGB32_1: c->chrToYV12 = bgr32ToUV; break;
+        case PIX_FMT_BGR24  : c->chrToYV12 = RENAME(bgr24ToUV); break;
+        case PIX_FMT_BGR565 : c->chrToYV12 = bgr16ToUV; break;
+        case PIX_FMT_BGR555 : c->chrToYV12 = bgr15ToUV; break;
+        case PIX_FMT_BGR32  :
+        case PIX_FMT_BGR32_1: c->chrToYV12 = rgb32ToUV; break;
+        case PIX_FMT_RGB24  : c->chrToYV12 = RENAME(rgb24ToUV); break;
+        case PIX_FMT_RGB565 : c->chrToYV12 = rgb16ToUV; break;
+        case PIX_FMT_RGB555 : c->chrToYV12 = rgb15ToUV; break;
+        }
+    }
+
+    c->lumToYV12 = NULL;
+    c->alpToYV12 = NULL;
+    switch (srcFormat) {
+    case PIX_FMT_YUYV422  :
+    case PIX_FMT_YUV420P16BE:
+    case PIX_FMT_YUV422P16BE:
+    case PIX_FMT_YUV444P16BE:
+    case PIX_FMT_GRAY16BE : c->lumToYV12 = RENAME(yuy2ToY); break;
+    case PIX_FMT_UYVY422  :
+    case PIX_FMT_YUV420P16LE:
+    case PIX_FMT_YUV422P16LE:
+    case PIX_FMT_YUV444P16LE:
+    case PIX_FMT_GRAY16LE : c->lumToYV12 = RENAME(uyvyToY); break;
+    case PIX_FMT_BGR24    : c->lumToYV12 = RENAME(bgr24ToY); break;
+    case PIX_FMT_BGR565   : c->lumToYV12 = bgr16ToY; break;
+    case PIX_FMT_BGR555   : c->lumToYV12 = bgr15ToY; break;
+    case PIX_FMT_RGB24    : c->lumToYV12 = RENAME(rgb24ToY); break;
+    case PIX_FMT_RGB565   : c->lumToYV12 = rgb16ToY; break;
+    case PIX_FMT_RGB555   : c->lumToYV12 = rgb15ToY; break;
+    case PIX_FMT_RGB8     :
+    case PIX_FMT_BGR8     :
+    case PIX_FMT_PAL8     :
+    case PIX_FMT_BGR4_BYTE:
+    case PIX_FMT_RGB4_BYTE: c->lumToYV12 = palToY; break;
+    case PIX_FMT_MONOBLACK: c->lumToYV12 = monoblack2Y; break;
+    case PIX_FMT_MONOWHITE: c->lumToYV12 = monowhite2Y; break;
+    case PIX_FMT_RGB32  :
+    case PIX_FMT_RGB32_1: c->lumToYV12 = bgr32ToY; break;
+    case PIX_FMT_BGR32  :
+    case PIX_FMT_BGR32_1: c->lumToYV12 = rgb32ToY; break;
+    case PIX_FMT_RGB48BE:
+    case PIX_FMT_RGB48LE: c->lumToYV12 = rgb48ToY; break;
+    }
+    if (c->alpPixBuf) {
+        switch (srcFormat) {
+        case PIX_FMT_RGB32  :
+        case PIX_FMT_RGB32_1:
+        case PIX_FMT_BGR32  :
+        case PIX_FMT_BGR32_1: c->alpToYV12 = abgrToA; break;
+        }
+    }
+
+    switch (srcFormat) {
+    case PIX_FMT_RGB32  :
+    case PIX_FMT_BGR32  :
+        c->alpSrcOffset = 3;
+        break;
+    case PIX_FMT_RGB32_1:
+    case PIX_FMT_BGR32_1:
+        c->lumSrcOffset = ALT32_CORR;
+        c->chrSrcOffset = ALT32_CORR;
+        break;
+    case PIX_FMT_RGB48LE:
+        c->lumSrcOffset = 1;
+        c->chrSrcOffset = 1;
+        c->alpSrcOffset = 1;
+        break;
+    }
+
+    if (c->srcRange != c->dstRange && !isAnyRGB(c->dstFormat)) {
+        if (c->srcRange) {
+            c->lumConvertRange = RENAME(lumRangeFromJpeg);
+            c->chrConvertRange = RENAME(chrRangeFromJpeg);
+        } else {
+            c->lumConvertRange = RENAME(lumRangeToJpeg);
+            c->chrConvertRange = RENAME(chrRangeToJpeg);
+        }
+    }
+
+    if (!(isGray(srcFormat) || isGray(c->dstFormat) ||
+          srcFormat == PIX_FMT_MONOBLACK || srcFormat == PIX_FMT_MONOWHITE))
+        c->needs_hcscale = 1;
+}

Added: branches/0.6/libswscale/utils.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/utils.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,1591 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer <michaelni at gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define _SVID_SOURCE //needed for MAP_ANONYMOUS
+#include <inttypes.h>
+#include <string.h>
+#include <math.h>
+#include <stdio.h>
+#include "config.h"
+#include <assert.h>
+#if HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#endif
+#if HAVE_VIRTUALALLOC
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#endif
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "rgb2rgb.h"
+#include "libavutil/intreadwrite.h"
+#include "libavutil/x86_cpu.h"
+#include "libavutil/avutil.h"
+#include "libavutil/bswap.h"
+#include "libavutil/pixdesc.h"
+
+unsigned swscale_version(void)
+{
+    return LIBSWSCALE_VERSION_INT;
+}
+
+const char *swscale_configuration(void)
+{
+    return FFMPEG_CONFIGURATION;
+}
+
+const char *swscale_license(void)
+{
+#define LICENSE_PREFIX "libswscale license: "
+    return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
+}
+
+#define RET 0xC3 //near return opcode for x86
+
+#define isSupportedIn(x)    (       \
+           (x)==PIX_FMT_YUV420P     \
+        || (x)==PIX_FMT_YUVA420P    \
+        || (x)==PIX_FMT_YUYV422     \
+        || (x)==PIX_FMT_UYVY422     \
+        || (x)==PIX_FMT_RGB48BE     \
+        || (x)==PIX_FMT_RGB48LE     \
+        || (x)==PIX_FMT_RGB32       \
+        || (x)==PIX_FMT_RGB32_1     \
+        || (x)==PIX_FMT_BGR24       \
+        || (x)==PIX_FMT_BGR565      \
+        || (x)==PIX_FMT_BGR555      \
+        || (x)==PIX_FMT_BGR32       \
+        || (x)==PIX_FMT_BGR32_1     \
+        || (x)==PIX_FMT_RGB24       \
+        || (x)==PIX_FMT_RGB565      \
+        || (x)==PIX_FMT_RGB555      \
+        || (x)==PIX_FMT_GRAY8       \
+        || (x)==PIX_FMT_YUV410P     \
+        || (x)==PIX_FMT_YUV440P     \
+        || (x)==PIX_FMT_NV12        \
+        || (x)==PIX_FMT_NV21        \
+        || (x)==PIX_FMT_GRAY16BE    \
+        || (x)==PIX_FMT_GRAY16LE    \
+        || (x)==PIX_FMT_YUV444P     \
+        || (x)==PIX_FMT_YUV422P     \
+        || (x)==PIX_FMT_YUV411P     \
+        || (x)==PIX_FMT_YUVJ420P    \
+        || (x)==PIX_FMT_YUVJ422P    \
+        || (x)==PIX_FMT_YUVJ440P    \
+        || (x)==PIX_FMT_YUVJ444P    \
+        || (x)==PIX_FMT_PAL8        \
+        || (x)==PIX_FMT_BGR8        \
+        || (x)==PIX_FMT_RGB8        \
+        || (x)==PIX_FMT_BGR4_BYTE   \
+        || (x)==PIX_FMT_RGB4_BYTE   \
+        || (x)==PIX_FMT_YUV440P     \
+        || (x)==PIX_FMT_MONOWHITE   \
+        || (x)==PIX_FMT_MONOBLACK   \
+        || (x)==PIX_FMT_YUV420P16LE   \
+        || (x)==PIX_FMT_YUV422P16LE   \
+        || (x)==PIX_FMT_YUV444P16LE   \
+        || (x)==PIX_FMT_YUV420P16BE   \
+        || (x)==PIX_FMT_YUV422P16BE   \
+        || (x)==PIX_FMT_YUV444P16BE   \
+    )
+
+int sws_isSupportedInput(enum PixelFormat pix_fmt)
+{
+    return isSupportedIn(pix_fmt);
+}
+
+#define isSupportedOut(x)   (       \
+           (x)==PIX_FMT_YUV420P     \
+        || (x)==PIX_FMT_YUVA420P    \
+        || (x)==PIX_FMT_YUYV422     \
+        || (x)==PIX_FMT_UYVY422     \
+        || (x)==PIX_FMT_YUV444P     \
+        || (x)==PIX_FMT_YUV422P     \
+        || (x)==PIX_FMT_YUV411P     \
+        || (x)==PIX_FMT_YUVJ420P    \
+        || (x)==PIX_FMT_YUVJ422P    \
+        || (x)==PIX_FMT_YUVJ440P    \
+        || (x)==PIX_FMT_YUVJ444P    \
+        || isAnyRGB(x)              \
+        || (x)==PIX_FMT_NV12        \
+        || (x)==PIX_FMT_NV21        \
+        || (x)==PIX_FMT_GRAY16BE    \
+        || (x)==PIX_FMT_GRAY16LE    \
+        || (x)==PIX_FMT_GRAY8       \
+        || (x)==PIX_FMT_YUV410P     \
+        || (x)==PIX_FMT_YUV440P     \
+        || (x)==PIX_FMT_YUV420P16LE   \
+        || (x)==PIX_FMT_YUV422P16LE   \
+        || (x)==PIX_FMT_YUV444P16LE   \
+        || (x)==PIX_FMT_YUV420P16BE   \
+        || (x)==PIX_FMT_YUV422P16BE   \
+        || (x)==PIX_FMT_YUV444P16BE   \
+    )
+
+int sws_isSupportedOutput(enum PixelFormat pix_fmt)
+{
+    return isSupportedOut(pix_fmt);
+}
+
+extern const int32_t ff_yuv2rgb_coeffs[8][4];
+
+const char *sws_format_name(enum PixelFormat format)
+{
+    if ((unsigned)format < PIX_FMT_NB && av_pix_fmt_descriptors[format].name)
+        return av_pix_fmt_descriptors[format].name;
+    else
+        return "Unknown format";
+}
+
+static double getSplineCoeff(double a, double b, double c, double d, double dist)
+{
+//    printf("%f %f %f %f %f\n", a,b,c,d,dist);
+    if (dist<=1.0) return ((d*dist + c)*dist + b)*dist +a;
+    else           return getSplineCoeff(        0.0,
+                                          b+ 2.0*c + 3.0*d,
+                                                 c + 3.0*d,
+                                         -b- 3.0*c - 6.0*d,
+                                         dist-1.0);
+}
+
+static int initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc,
+                      int srcW, int dstW, int filterAlign, int one, int flags,
+                      SwsVector *srcFilter, SwsVector *dstFilter, double param[2])
+{
+    int i;
+    int filterSize;
+    int filter2Size;
+    int minFilterSize;
+    int64_t *filter=NULL;
+    int64_t *filter2=NULL;
+    const int64_t fone= 1LL<<54;
+    int ret= -1;
+#if ARCH_X86
+    if (flags & SWS_CPU_CAPS_MMX)
+        __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions)
+#endif
+
+    // NOTE: the +1 is for the MMX scaler which reads over the end
+    FF_ALLOC_OR_GOTO(NULL, *filterPos, (dstW+1)*sizeof(int16_t), fail);
+
+    if (FFABS(xInc - 0x10000) <10) { // unscaled
+        int i;
+        filterSize= 1;
+        FF_ALLOCZ_OR_GOTO(NULL, filter, dstW*sizeof(*filter)*filterSize, fail);
+
+        for (i=0; i<dstW; i++) {
+            filter[i*filterSize]= fone;
+            (*filterPos)[i]=i;
+        }
+
+    } else if (flags&SWS_POINT) { // lame looking point sampling mode
+        int i;
+        int xDstInSrc;
+        filterSize= 1;
+        FF_ALLOC_OR_GOTO(NULL, filter, dstW*sizeof(*filter)*filterSize, fail);
+
+        xDstInSrc= xInc/2 - 0x8000;
+        for (i=0; i<dstW; i++) {
+            int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
+
+            (*filterPos)[i]= xx;
+            filter[i]= fone;
+            xDstInSrc+= xInc;
+        }
+    } else if ((xInc <= (1<<16) && (flags&SWS_AREA)) || (flags&SWS_FAST_BILINEAR)) { // bilinear upscale
+        int i;
+        int xDstInSrc;
+        filterSize= 2;
+        FF_ALLOC_OR_GOTO(NULL, filter, dstW*sizeof(*filter)*filterSize, fail);
+
+        xDstInSrc= xInc/2 - 0x8000;
+        for (i=0; i<dstW; i++) {
+            int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
+            int j;
+
+            (*filterPos)[i]= xx;
+            //bilinear upscale / linear interpolate / area averaging
+            for (j=0; j<filterSize; j++) {
+                int64_t coeff= fone - FFABS((xx<<16) - xDstInSrc)*(fone>>16);
+                if (coeff<0) coeff=0;
+                filter[i*filterSize + j]= coeff;
+                xx++;
+            }
+            xDstInSrc+= xInc;
+        }
+    } else {
+        int xDstInSrc;
+        int sizeFactor;
+
+        if      (flags&SWS_BICUBIC)      sizeFactor=  4;
+        else if (flags&SWS_X)            sizeFactor=  8;
+        else if (flags&SWS_AREA)         sizeFactor=  1; //downscale only, for upscale it is bilinear
+        else if (flags&SWS_GAUSS)        sizeFactor=  8;   // infinite ;)
+        else if (flags&SWS_LANCZOS)      sizeFactor= param[0] != SWS_PARAM_DEFAULT ? ceil(2*param[0]) : 6;
+        else if (flags&SWS_SINC)         sizeFactor= 20; // infinite ;)
+        else if (flags&SWS_SPLINE)       sizeFactor= 20;  // infinite ;)
+        else if (flags&SWS_BILINEAR)     sizeFactor=  2;
+        else {
+            sizeFactor= 0; //GCC warning killer
+            assert(0);
+        }
+
+        if (xInc <= 1<<16)      filterSize= 1 + sizeFactor; // upscale
+        else                    filterSize= 1 + (sizeFactor*srcW + dstW - 1)/ dstW;
+
+        if (filterSize > srcW-2) filterSize=srcW-2;
+
+        FF_ALLOC_OR_GOTO(NULL, filter, dstW*sizeof(*filter)*filterSize, fail);
+
+        xDstInSrc= xInc - 0x10000;
+        for (i=0; i<dstW; i++) {
+            int xx= (xDstInSrc - ((filterSize-2)<<16)) / (1<<17);
+            int j;
+            (*filterPos)[i]= xx;
+            for (j=0; j<filterSize; j++) {
+                int64_t d= ((int64_t)FFABS((xx<<17) - xDstInSrc))<<13;
+                double floatd;
+                int64_t coeff;
+
+                if (xInc > 1<<16)
+                    d= d*dstW/srcW;
+                floatd= d * (1.0/(1<<30));
+
+                if (flags & SWS_BICUBIC) {
+                    int64_t B= (param[0] != SWS_PARAM_DEFAULT ? param[0] :   0) * (1<<24);
+                    int64_t C= (param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6) * (1<<24);
+                    int64_t dd = ( d*d)>>30;
+                    int64_t ddd= (dd*d)>>30;
+
+                    if      (d < 1LL<<30)
+                        coeff = (12*(1<<24)-9*B-6*C)*ddd + (-18*(1<<24)+12*B+6*C)*dd + (6*(1<<24)-2*B)*(1<<30);
+                    else if (d < 1LL<<31)
+                        coeff = (-B-6*C)*ddd + (6*B+30*C)*dd + (-12*B-48*C)*d + (8*B+24*C)*(1<<30);
+                    else
+                        coeff=0.0;
+                    coeff *= fone>>(30+24);
+                }
+/*                else if (flags & SWS_X) {
+                    double p= param ? param*0.01 : 0.3;
+                    coeff = d ? sin(d*PI)/(d*PI) : 1.0;
+                    coeff*= pow(2.0, - p*d*d);
+                }*/
+                else if (flags & SWS_X) {
+                    double A= param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
+                    double c;
+
+                    if (floatd<1.0)
+                        c = cos(floatd*M_PI);
+                    else
+                        c=-1.0;
+                    if (c<0.0)      c= -pow(-c, A);
+                    else            c=  pow( c, A);
+                    coeff= (c*0.5 + 0.5)*fone;
+                } else if (flags & SWS_AREA) {
+                    int64_t d2= d - (1<<29);
+                    if      (d2*xInc < -(1LL<<(29+16))) coeff= 1.0 * (1LL<<(30+16));
+                    else if (d2*xInc <  (1LL<<(29+16))) coeff= -d2*xInc + (1LL<<(29+16));
+                    else coeff=0.0;
+                    coeff *= fone>>(30+16);
+                } else if (flags & SWS_GAUSS) {
+                    double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
+                    coeff = (pow(2.0, - p*floatd*floatd))*fone;
+                } else if (flags & SWS_SINC) {
+                    coeff = (d ? sin(floatd*M_PI)/(floatd*M_PI) : 1.0)*fone;
+                } else if (flags & SWS_LANCZOS) {
+                    double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
+                    coeff = (d ? sin(floatd*M_PI)*sin(floatd*M_PI/p)/(floatd*floatd*M_PI*M_PI/p) : 1.0)*fone;
+                    if (floatd>p) coeff=0;
+                } else if (flags & SWS_BILINEAR) {
+                    coeff= (1<<30) - d;
+                    if (coeff<0) coeff=0;
+                    coeff *= fone >> 30;
+                } else if (flags & SWS_SPLINE) {
+                    double p=-2.196152422706632;
+                    coeff = getSplineCoeff(1.0, 0.0, p, -p-1.0, floatd) * fone;
+                } else {
+                    coeff= 0.0; //GCC warning killer
+                    assert(0);
+                }
+
+                filter[i*filterSize + j]= coeff;
+                xx++;
+            }
+            xDstInSrc+= 2*xInc;
+        }
+    }
+
+    /* apply src & dst Filter to filter -> filter2
+       av_free(filter);
+    */
+    assert(filterSize>0);
+    filter2Size= filterSize;
+    if (srcFilter) filter2Size+= srcFilter->length - 1;
+    if (dstFilter) filter2Size+= dstFilter->length - 1;
+    assert(filter2Size>0);
+    FF_ALLOCZ_OR_GOTO(NULL, filter2, filter2Size*dstW*sizeof(*filter2), fail);
+
+    for (i=0; i<dstW; i++) {
+        int j, k;
+
+        if(srcFilter) {
+            for (k=0; k<srcFilter->length; k++) {
+                for (j=0; j<filterSize; j++)
+                    filter2[i*filter2Size + k + j] += srcFilter->coeff[k]*filter[i*filterSize + j];
+            }
+        } else {
+            for (j=0; j<filterSize; j++)
+                filter2[i*filter2Size + j]= filter[i*filterSize + j];
+        }
+        //FIXME dstFilter
+
+        (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2;
+    }
+    av_freep(&filter);
+
+    /* try to reduce the filter-size (step1 find size and shift left) */
+    // Assume it is near normalized (*0.5 or *2.0 is OK but * 0.001 is not).
+    minFilterSize= 0;
+    for (i=dstW-1; i>=0; i--) {
+        int min= filter2Size;
+        int j;
+        int64_t cutOff=0.0;
+
+        /* get rid of near zero elements on the left by shifting left */
+        for (j=0; j<filter2Size; j++) {
+            int k;
+            cutOff += FFABS(filter2[i*filter2Size]);
+
+            if (cutOff > SWS_MAX_REDUCE_CUTOFF*fone) break;
+
+            /* preserve monotonicity because the core can't handle the filter otherwise */
+            if (i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
+
+            // move filter coefficients left
+            for (k=1; k<filter2Size; k++)
+                filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k];
+            filter2[i*filter2Size + k - 1]= 0;
+            (*filterPos)[i]++;
+        }
+
+        cutOff=0;
+        /* count near zeros on the right */
+        for (j=filter2Size-1; j>0; j--) {
+            cutOff += FFABS(filter2[i*filter2Size + j]);
+
+            if (cutOff > SWS_MAX_REDUCE_CUTOFF*fone) break;
+            min--;
+        }
+
+        if (min>minFilterSize) minFilterSize= min;
+    }
+
+    if (flags & SWS_CPU_CAPS_ALTIVEC) {
+        // we can handle the special case 4,
+        // so we don't want to go to the full 8
+        if (minFilterSize < 5)
+            filterAlign = 4;
+
+        // We really don't want to waste our time
+        // doing useless computation, so fall back on
+        // the scalar C code for very small filters.
+        // Vectorizing is worth it only if you have a
+        // decent-sized vector.
+        if (minFilterSize < 3)
+            filterAlign = 1;
+    }
+
+    if (flags & SWS_CPU_CAPS_MMX) {
+        // special case for unscaled vertical filtering
+        if (minFilterSize == 1 && filterAlign == 2)
+            filterAlign= 1;
+    }
+
+    assert(minFilterSize > 0);
+    filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1));
+    assert(filterSize > 0);
+    filter= av_malloc(filterSize*dstW*sizeof(*filter));
+    if (filterSize >= MAX_FILTER_SIZE*16/((flags&SWS_ACCURATE_RND) ? APCK_SIZE : 16) || !filter)
+        goto fail;
+    *outFilterSize= filterSize;
+
+    if (flags&SWS_PRINT_INFO)
+        av_log(NULL, AV_LOG_VERBOSE, "SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize);
+    /* try to reduce the filter-size (step2 reduce it) */
+    for (i=0; i<dstW; i++) {
+        int j;
+
+        for (j=0; j<filterSize; j++) {
+            if (j>=filter2Size) filter[i*filterSize + j]= 0;
+            else               filter[i*filterSize + j]= filter2[i*filter2Size + j];
+            if((flags & SWS_BITEXACT) && j>=minFilterSize)
+                filter[i*filterSize + j]= 0;
+        }
+    }
+
+    //FIXME try to align filterPos if possible
+
+    //fix borders
+    for (i=0; i<dstW; i++) {
+        int j;
+        if ((*filterPos)[i] < 0) {
+            // move filter coefficients left to compensate for filterPos
+            for (j=1; j<filterSize; j++) {
+                int left= FFMAX(j + (*filterPos)[i], 0);
+                filter[i*filterSize + left] += filter[i*filterSize + j];
+                filter[i*filterSize + j]=0;
+            }
+            (*filterPos)[i]= 0;
+        }
+
+        if ((*filterPos)[i] + filterSize > srcW) {
+            int shift= (*filterPos)[i] + filterSize - srcW;
+            // move filter coefficients right to compensate for filterPos
+            for (j=filterSize-2; j>=0; j--) {
+                int right= FFMIN(j + shift, filterSize-1);
+                filter[i*filterSize +right] += filter[i*filterSize +j];
+                filter[i*filterSize +j]=0;
+            }
+            (*filterPos)[i]= srcW - filterSize;
+        }
+    }
+
+    // Note the +1 is for the MMX scaler which reads over the end
+    /* align at 16 for AltiVec (needed by hScale_altivec_real) */
+    FF_ALLOCZ_OR_GOTO(NULL, *outFilter, *outFilterSize*(dstW+1)*sizeof(int16_t), fail);
+
+    /* normalize & store in outFilter */
+    for (i=0; i<dstW; i++) {
+        int j;
+        int64_t error=0;
+        int64_t sum=0;
+
+        for (j=0; j<filterSize; j++) {
+            sum+= filter[i*filterSize + j];
+        }
+        sum= (sum + one/2)/ one;
+        for (j=0; j<*outFilterSize; j++) {
+            int64_t v= filter[i*filterSize + j] + error;
+            int intV= ROUNDED_DIV(v, sum);
+            (*outFilter)[i*(*outFilterSize) + j]= intV;
+            error= v - intV*sum;
+        }
+    }
+
+    (*filterPos)[dstW]= (*filterPos)[dstW-1]; // the MMX scaler will read over the end
+    for (i=0; i<*outFilterSize; i++) {
+        int j= dstW*(*outFilterSize);
+        (*outFilter)[j + i]= (*outFilter)[j + i - (*outFilterSize)];
+    }
+
+    ret=0;
+fail:
+    av_free(filter);
+    av_free(filter2);
+    return ret;
+}
+
+#if ARCH_X86 && (HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT)
+static int initMMX2HScaler(int dstW, int xInc, uint8_t *filterCode, int16_t *filter, int32_t *filterPos, int numSplits)
+{
+    uint8_t *fragmentA;
+    x86_reg imm8OfPShufW1A;
+    x86_reg imm8OfPShufW2A;
+    x86_reg fragmentLengthA;
+    uint8_t *fragmentB;
+    x86_reg imm8OfPShufW1B;
+    x86_reg imm8OfPShufW2B;
+    x86_reg fragmentLengthB;
+    int fragmentPos;
+
+    int xpos, i;
+
+    // create an optimized horizontal scaling routine
+    /* This scaler is made of runtime-generated MMX2 code using specially
+     * tuned pshufw instructions. For every four output pixels, if four
+     * input pixels are enough for the fast bilinear scaling, then a chunk
+     * of fragmentB is used. If five input pixels are needed, then a chunk
+     * of fragmentA is used.
+     */
+
+    //code fragment
+
+    __asm__ volatile(
+        "jmp                         9f                 \n\t"
+    // Begin
+        "0:                                             \n\t"
+        "movq    (%%"REG_d", %%"REG_a"), %%mm3          \n\t"
+        "movd    (%%"REG_c", %%"REG_S"), %%mm0          \n\t"
+        "movd   1(%%"REG_c", %%"REG_S"), %%mm1          \n\t"
+        "punpcklbw                %%mm7, %%mm1          \n\t"
+        "punpcklbw                %%mm7, %%mm0          \n\t"
+        "pshufw                   $0xFF, %%mm1, %%mm1   \n\t"
+        "1:                                             \n\t"
+        "pshufw                   $0xFF, %%mm0, %%mm0   \n\t"
+        "2:                                             \n\t"
+        "psubw                    %%mm1, %%mm0          \n\t"
+        "movl   8(%%"REG_b", %%"REG_a"), %%esi          \n\t"
+        "pmullw                   %%mm3, %%mm0          \n\t"
+        "psllw                       $7, %%mm1          \n\t"
+        "paddw                    %%mm1, %%mm0          \n\t"
+
+        "movq                     %%mm0, (%%"REG_D", %%"REG_a") \n\t"
+
+        "add                         $8, %%"REG_a"      \n\t"
+    // End
+        "9:                                             \n\t"
+//        "int $3                                         \n\t"
+        "lea                 " LOCAL_MANGLE(0b) ", %0   \n\t"
+        "lea                 " LOCAL_MANGLE(1b) ", %1   \n\t"
+        "lea                 " LOCAL_MANGLE(2b) ", %2   \n\t"
+        "dec                         %1                 \n\t"
+        "dec                         %2                 \n\t"
+        "sub                         %0, %1             \n\t"
+        "sub                         %0, %2             \n\t"
+        "lea                 " LOCAL_MANGLE(9b) ", %3   \n\t"
+        "sub                         %0, %3             \n\t"
+
+
+        :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
+        "=r" (fragmentLengthA)
+    );
+
+    __asm__ volatile(
+        "jmp                         9f                 \n\t"
+    // Begin
+        "0:                                             \n\t"
+        "movq    (%%"REG_d", %%"REG_a"), %%mm3          \n\t"
+        "movd    (%%"REG_c", %%"REG_S"), %%mm0          \n\t"
+        "punpcklbw                %%mm7, %%mm0          \n\t"
+        "pshufw                   $0xFF, %%mm0, %%mm1   \n\t"
+        "1:                                             \n\t"
+        "pshufw                   $0xFF, %%mm0, %%mm0   \n\t"
+        "2:                                             \n\t"
+        "psubw                    %%mm1, %%mm0          \n\t"
+        "movl   8(%%"REG_b", %%"REG_a"), %%esi          \n\t"
+        "pmullw                   %%mm3, %%mm0          \n\t"
+        "psllw                       $7, %%mm1          \n\t"
+        "paddw                    %%mm1, %%mm0          \n\t"
+
+        "movq                     %%mm0, (%%"REG_D", %%"REG_a") \n\t"
+
+        "add                         $8, %%"REG_a"      \n\t"
+    // End
+        "9:                                             \n\t"
+//        "int                       $3                   \n\t"
+        "lea                 " LOCAL_MANGLE(0b) ", %0   \n\t"
+        "lea                 " LOCAL_MANGLE(1b) ", %1   \n\t"
+        "lea                 " LOCAL_MANGLE(2b) ", %2   \n\t"
+        "dec                         %1                 \n\t"
+        "dec                         %2                 \n\t"
+        "sub                         %0, %1             \n\t"
+        "sub                         %0, %2             \n\t"
+        "lea                 " LOCAL_MANGLE(9b) ", %3   \n\t"
+        "sub                         %0, %3             \n\t"
+
+
+        :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
+        "=r" (fragmentLengthB)
+    );
+
+    xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
+    fragmentPos=0;
+
+    for (i=0; i<dstW/numSplits; i++) {
+        int xx=xpos>>16;
+
+        if ((i&3) == 0) {
+            int a=0;
+            int b=((xpos+xInc)>>16) - xx;
+            int c=((xpos+xInc*2)>>16) - xx;
+            int d=((xpos+xInc*3)>>16) - xx;
+            int inc                = (d+1<4);
+            uint8_t *fragment      = (d+1<4) ? fragmentB       : fragmentA;
+            x86_reg imm8OfPShufW1  = (d+1<4) ? imm8OfPShufW1B  : imm8OfPShufW1A;
+            x86_reg imm8OfPShufW2  = (d+1<4) ? imm8OfPShufW2B  : imm8OfPShufW2A;
+            x86_reg fragmentLength = (d+1<4) ? fragmentLengthB : fragmentLengthA;
+            int maxShift= 3-(d+inc);
+            int shift=0;
+
+            if (filterCode) {
+                filter[i  ] = (( xpos         & 0xFFFF) ^ 0xFFFF)>>9;
+                filter[i+1] = (((xpos+xInc  ) & 0xFFFF) ^ 0xFFFF)>>9;
+                filter[i+2] = (((xpos+xInc*2) & 0xFFFF) ^ 0xFFFF)>>9;
+                filter[i+3] = (((xpos+xInc*3) & 0xFFFF) ^ 0xFFFF)>>9;
+                filterPos[i/2]= xx;
+
+                memcpy(filterCode + fragmentPos, fragment, fragmentLength);
+
+                filterCode[fragmentPos + imm8OfPShufW1]=
+                    (a+inc) | ((b+inc)<<2) | ((c+inc)<<4) | ((d+inc)<<6);
+                filterCode[fragmentPos + imm8OfPShufW2]=
+                    a | (b<<2) | (c<<4) | (d<<6);
+
+                if (i+4-inc>=dstW) shift=maxShift; //avoid overread
+                else if ((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //Align
+
+                if (shift && i>=shift) {
+                    filterCode[fragmentPos + imm8OfPShufW1]+= 0x55*shift;
+                    filterCode[fragmentPos + imm8OfPShufW2]+= 0x55*shift;
+                    filterPos[i/2]-=shift;
+                }
+            }
+
+            fragmentPos+= fragmentLength;
+
+            if (filterCode)
+                filterCode[fragmentPos]= RET;
+        }
+        xpos+=xInc;
+    }
+    if (filterCode)
+        filterPos[((i/2)+1)&(~1)]= xpos>>16; // needed to jump to the next part
+
+    return fragmentPos + 1;
+}
+#endif /* ARCH_X86 && (HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT) */
+
+static void getSubSampleFactors(int *h, int *v, enum PixelFormat format)
+{
+    *h = av_pix_fmt_descriptors[format].log2_chroma_w;
+    *v = av_pix_fmt_descriptors[format].log2_chroma_h;
+}
+
+static uint16_t roundToInt16(int64_t f)
+{
+    int r= (f + (1<<15))>>16;
+         if (r<-0x7FFF) return 0x8000;
+    else if (r> 0x7FFF) return 0x7FFF;
+    else                return r;
+}
+
+int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation)
+{
+    int64_t crv =  inv_table[0];
+    int64_t cbu =  inv_table[1];
+    int64_t cgu = -inv_table[2];
+    int64_t cgv = -inv_table[3];
+    int64_t cy  = 1<<16;
+    int64_t oy  = 0;
+
+    memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4);
+    memcpy(c->dstColorspaceTable,     table, sizeof(int)*4);
+
+    c->brightness= brightness;
+    c->contrast  = contrast;
+    c->saturation= saturation;
+    c->srcRange  = srcRange;
+    c->dstRange  = dstRange;
+    if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
+
+    c->uOffset=   0x0400040004000400LL;
+    c->vOffset=   0x0400040004000400LL;
+
+    if (!srcRange) {
+        cy= (cy*255) / 219;
+        oy= 16<<16;
+    } else {
+        crv= (crv*224) / 255;
+        cbu= (cbu*224) / 255;
+        cgu= (cgu*224) / 255;
+        cgv= (cgv*224) / 255;
+    }
+
+    cy = (cy *contrast             )>>16;
+    crv= (crv*contrast * saturation)>>32;
+    cbu= (cbu*contrast * saturation)>>32;
+    cgu= (cgu*contrast * saturation)>>32;
+    cgv= (cgv*contrast * saturation)>>32;
+
+    oy -= 256*brightness;
+
+    c->yCoeff=    roundToInt16(cy *8192) * 0x0001000100010001ULL;
+    c->vrCoeff=   roundToInt16(crv*8192) * 0x0001000100010001ULL;
+    c->ubCoeff=   roundToInt16(cbu*8192) * 0x0001000100010001ULL;
+    c->vgCoeff=   roundToInt16(cgv*8192) * 0x0001000100010001ULL;
+    c->ugCoeff=   roundToInt16(cgu*8192) * 0x0001000100010001ULL;
+    c->yOffset=   roundToInt16(oy *   8) * 0x0001000100010001ULL;
+
+    c->yuv2rgb_y_coeff  = (int16_t)roundToInt16(cy <<13);
+    c->yuv2rgb_y_offset = (int16_t)roundToInt16(oy << 9);
+    c->yuv2rgb_v2r_coeff= (int16_t)roundToInt16(crv<<13);
+    c->yuv2rgb_v2g_coeff= (int16_t)roundToInt16(cgv<<13);
+    c->yuv2rgb_u2g_coeff= (int16_t)roundToInt16(cgu<<13);
+    c->yuv2rgb_u2b_coeff= (int16_t)roundToInt16(cbu<<13);
+
+    ff_yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation);
+    //FIXME factorize
+
+#if HAVE_ALTIVEC
+    if (c->flags & SWS_CPU_CAPS_ALTIVEC)
+        ff_yuv2rgb_init_tables_altivec(c, inv_table, brightness, contrast, saturation);
+#endif
+    return 0;
+}
+
+int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation)
+{
+    if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
+
+    *inv_table = c->srcColorspaceTable;
+    *table     = c->dstColorspaceTable;
+    *srcRange  = c->srcRange;
+    *dstRange  = c->dstRange;
+    *brightness= c->brightness;
+    *contrast  = c->contrast;
+    *saturation= c->saturation;
+
+    return 0;
+}
+
+static int handle_jpeg(enum PixelFormat *format)
+{
+    switch (*format) {
+    case PIX_FMT_YUVJ420P:
+        *format = PIX_FMT_YUV420P;
+        return 1;
+    case PIX_FMT_YUVJ422P:
+        *format = PIX_FMT_YUV422P;
+        return 1;
+    case PIX_FMT_YUVJ444P:
+        *format = PIX_FMT_YUV444P;
+        return 1;
+    case PIX_FMT_YUVJ440P:
+        *format = PIX_FMT_YUV440P;
+        return 1;
+    default:
+        return 0;
+    }
+}
+
+SwsContext *sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat,
+                           int dstW, int dstH, enum PixelFormat dstFormat, int flags,
+                           SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
+{
+    SwsContext *c;
+    int i;
+    int usesVFilter, usesHFilter;
+    int unscaled;
+    int srcRange, dstRange;
+    SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
+#if ARCH_X86
+    if (flags & SWS_CPU_CAPS_MMX)
+        __asm__ volatile("emms\n\t"::: "memory");
+#endif
+
+#if !CONFIG_RUNTIME_CPUDETECT //ensure that the flags match the compiled variant if cpudetect is off
+    flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC|SWS_CPU_CAPS_BFIN);
+    flags |= ff_hardcodedcpuflags();
+#endif /* CONFIG_RUNTIME_CPUDETECT */
+    if (!rgb15to16) sws_rgb2rgb_init(flags);
+
+    unscaled = (srcW == dstW && srcH == dstH);
+
+    srcRange = handle_jpeg(&srcFormat);
+    dstRange = handle_jpeg(&dstFormat);
+
+    if (!isSupportedIn(srcFormat)) {
+        av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as input pixel format\n", sws_format_name(srcFormat));
+        return NULL;
+    }
+    if (!isSupportedOut(dstFormat)) {
+        av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as output pixel format\n", sws_format_name(dstFormat));
+        return NULL;
+    }
+
+    i= flags & ( SWS_POINT
+                |SWS_AREA
+                |SWS_BILINEAR
+                |SWS_FAST_BILINEAR
+                |SWS_BICUBIC
+                |SWS_X
+                |SWS_GAUSS
+                |SWS_LANCZOS
+                |SWS_SINC
+                |SWS_SPLINE
+                |SWS_BICUBLIN);
+    if(!i || (i & (i-1))) {
+        av_log(NULL, AV_LOG_ERROR, "swScaler: Exactly one scaler algorithm must be chosen\n");
+        return NULL;
+    }
+
+    /* sanity check */
+    if (srcW<4 || srcH<1 || dstW<8 || dstH<1) { //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code
+        av_log(NULL, AV_LOG_ERROR, "swScaler: %dx%d -> %dx%d is invalid scaling dimension\n",
+               srcW, srcH, dstW, dstH);
+        return NULL;
+    }
+    if(srcW > VOFW || dstW > VOFW) {
+        av_log(NULL, AV_LOG_ERROR, "swScaler: Compile-time maximum width is "AV_STRINGIFY(VOFW)" change VOF/VOFW and recompile\n");
+        return NULL;
+    }
+
+    if (!dstFilter) dstFilter= &dummyFilter;
+    if (!srcFilter) srcFilter= &dummyFilter;
+
+    FF_ALLOCZ_OR_GOTO(NULL, c, sizeof(SwsContext), fail);
+
+    c->av_class = &sws_context_class;
+    c->srcW= srcW;
+    c->srcH= srcH;
+    c->dstW= dstW;
+    c->dstH= dstH;
+    c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW;
+    c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH;
+    c->flags= flags;
+    c->dstFormat= dstFormat;
+    c->srcFormat= srcFormat;
+    c->dstFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[dstFormat]);
+    c->srcFormatBpp = av_get_bits_per_pixel(&av_pix_fmt_descriptors[srcFormat]);
+    c->vRounder= 4* 0x0001000100010001ULL;
+
+    usesVFilter = (srcFilter->lumV && srcFilter->lumV->length>1) ||
+                  (srcFilter->chrV && srcFilter->chrV->length>1) ||
+                  (dstFilter->lumV && dstFilter->lumV->length>1) ||
+                  (dstFilter->chrV && dstFilter->chrV->length>1);
+    usesHFilter = (srcFilter->lumH && srcFilter->lumH->length>1) ||
+                  (srcFilter->chrH && srcFilter->chrH->length>1) ||
+                  (dstFilter->lumH && dstFilter->lumH->length>1) ||
+                  (dstFilter->chrH && dstFilter->chrH->length>1);
+
+    getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat);
+    getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat);
+
+    // reuse chroma for 2 pixels RGB/BGR unless user wants full chroma interpolation
+    if (isAnyRGB(dstFormat) && !(flags&SWS_FULL_CHR_H_INT)) c->chrDstHSubSample=1;
+
+    // drop some chroma lines if the user wants it
+    c->vChrDrop= (flags&SWS_SRC_V_CHR_DROP_MASK)>>SWS_SRC_V_CHR_DROP_SHIFT;
+    c->chrSrcVSubSample+= c->vChrDrop;
+
+    // drop every other pixel for chroma calculation unless user wants full chroma
+    if (isAnyRGB(srcFormat) && !(flags&SWS_FULL_CHR_H_INP)
+      && srcFormat!=PIX_FMT_RGB8      && srcFormat!=PIX_FMT_BGR8
+      && srcFormat!=PIX_FMT_RGB4      && srcFormat!=PIX_FMT_BGR4
+      && srcFormat!=PIX_FMT_RGB4_BYTE && srcFormat!=PIX_FMT_BGR4_BYTE
+      && ((dstW>>c->chrDstHSubSample) <= (srcW>>1) || (flags&(SWS_FAST_BILINEAR|SWS_POINT))))
+        c->chrSrcHSubSample=1;
+
+    if (param) {
+        c->param[0] = param[0];
+        c->param[1] = param[1];
+    } else {
+        c->param[0] =
+        c->param[1] = SWS_PARAM_DEFAULT;
+    }
+
+    // Note the -((-x)>>y) is so that we always round toward +inf.
+    c->chrSrcW= -((-srcW) >> c->chrSrcHSubSample);
+    c->chrSrcH= -((-srcH) >> c->chrSrcVSubSample);
+    c->chrDstW= -((-dstW) >> c->chrDstHSubSample);
+    c->chrDstH= -((-dstH) >> c->chrDstVSubSample);
+
+    sws_setColorspaceDetails(c, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT], srcRange, ff_yuv2rgb_coeffs[SWS_CS_DEFAULT] /* FIXME*/, dstRange, 0, 1<<16, 1<<16);
+
+    /* unscaled special cases */
+    if (unscaled && !usesHFilter && !usesVFilter && (srcRange == dstRange || isAnyRGB(dstFormat))) {
+        ff_get_unscaled_swscale(c);
+
+        if (c->swScale) {
+            if (flags&SWS_PRINT_INFO)
+                av_log(c, AV_LOG_INFO, "using unscaled %s -> %s special converter\n",
+                       sws_format_name(srcFormat), sws_format_name(dstFormat));
+            return c;
+        }
+    }
+
+    if (flags & SWS_CPU_CAPS_MMX2) {
+        c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
+        if (!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR)) {
+            if (flags&SWS_PRINT_INFO)
+                av_log(c, AV_LOG_INFO, "output width is not a multiple of 32 -> no MMX2 scaler\n");
+        }
+        if (usesHFilter) c->canMMX2BeUsed=0;
+    }
+    else
+        c->canMMX2BeUsed=0;
+
+    c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW;
+    c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH;
+
+    // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
+    // but only for the FAST_BILINEAR mode otherwise do correct scaling
+    // n-2 is the last chrominance sample available
+    // this is not perfect, but no one should notice the difference, the more correct variant
+    // would be like the vertical one, but that would require some special code for the
+    // first and last pixel
+    if (flags&SWS_FAST_BILINEAR) {
+        if (c->canMMX2BeUsed) {
+            c->lumXInc+= 20;
+            c->chrXInc+= 20;
+        }
+        //we don't use the x86 asm scaler if MMX is available
+        else if (flags & SWS_CPU_CAPS_MMX) {
+            c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
+            c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20;
+        }
+    }
+
+    /* precalculate horizontal scaler filter coefficients */
+    {
+#if ARCH_X86 && (HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT)
+// can't downscale !!!
+        if (c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR)) {
+            c->lumMmx2FilterCodeSize = initMMX2HScaler(      dstW, c->lumXInc, NULL, NULL, NULL, 8);
+            c->chrMmx2FilterCodeSize = initMMX2HScaler(c->chrDstW, c->chrXInc, NULL, NULL, NULL, 4);
+
+#ifdef MAP_ANONYMOUS
+            c->lumMmx2FilterCode = mmap(NULL, c->lumMmx2FilterCodeSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+            c->chrMmx2FilterCode = mmap(NULL, c->chrMmx2FilterCodeSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+#elif HAVE_VIRTUALALLOC
+            c->lumMmx2FilterCode = VirtualAlloc(NULL, c->lumMmx2FilterCodeSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
+            c->chrMmx2FilterCode = VirtualAlloc(NULL, c->chrMmx2FilterCodeSize, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
+#else
+            c->lumMmx2FilterCode = av_malloc(c->lumMmx2FilterCodeSize);
+            c->chrMmx2FilterCode = av_malloc(c->chrMmx2FilterCodeSize);
+#endif
+
+            if (!c->lumMmx2FilterCode || !c->chrMmx2FilterCode)
+                goto fail;
+            FF_ALLOCZ_OR_GOTO(c, c->hLumFilter   , (dstW        /8+8)*sizeof(int16_t), fail);
+            FF_ALLOCZ_OR_GOTO(c, c->hChrFilter   , (c->chrDstW  /4+8)*sizeof(int16_t), fail);
+            FF_ALLOCZ_OR_GOTO(c, c->hLumFilterPos, (dstW      /2/8+8)*sizeof(int32_t), fail);
+            FF_ALLOCZ_OR_GOTO(c, c->hChrFilterPos, (c->chrDstW/2/4+8)*sizeof(int32_t), fail);
+
+            initMMX2HScaler(      dstW, c->lumXInc, c->lumMmx2FilterCode, c->hLumFilter, c->hLumFilterPos, 8);
+            initMMX2HScaler(c->chrDstW, c->chrXInc, c->chrMmx2FilterCode, c->hChrFilter, c->hChrFilterPos, 4);
+
+#ifdef MAP_ANONYMOUS
+            mprotect(c->lumMmx2FilterCode, c->lumMmx2FilterCodeSize, PROT_EXEC | PROT_READ);
+            mprotect(c->chrMmx2FilterCode, c->chrMmx2FilterCodeSize, PROT_EXEC | PROT_READ);
+#endif
+        } else
+#endif /* ARCH_X86 && (HAVE_MMX2 || CONFIG_RUNTIME_CPUDETECT) */
+        {
+            const int filterAlign=
+                (flags & SWS_CPU_CAPS_MMX) ? 4 :
+                (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+                1;
+
+            if (initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
+                           srcW      ,       dstW, filterAlign, 1<<14,
+                           (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC)  : flags,
+                           srcFilter->lumH, dstFilter->lumH, c->param) < 0)
+                goto fail;
+            if (initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
+                           c->chrSrcW, c->chrDstW, filterAlign, 1<<14,
+                           (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
+                           srcFilter->chrH, dstFilter->chrH, c->param) < 0)
+                goto fail;
+        }
+    } // initialize horizontal stuff
+
+    /* precalculate vertical scaler filter coefficients */
+    {
+        const int filterAlign=
+            (flags & SWS_CPU_CAPS_MMX) && (flags & SWS_ACCURATE_RND) ? 2 :
+            (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+            1;
+
+        if (initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
+                       srcH      ,        dstH, filterAlign, (1<<12),
+                       (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC)  : flags,
+                       srcFilter->lumV, dstFilter->lumV, c->param) < 0)
+            goto fail;
+        if (initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
+                       c->chrSrcH, c->chrDstH, filterAlign, (1<<12),
+                       (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
+                       srcFilter->chrV, dstFilter->chrV, c->param) < 0)
+            goto fail;
+
+#if HAVE_ALTIVEC
+        FF_ALLOC_OR_GOTO(c, c->vYCoeffsBank, sizeof (vector signed short)*c->vLumFilterSize*c->dstH, fail);
+        FF_ALLOC_OR_GOTO(c, c->vCCoeffsBank, sizeof (vector signed short)*c->vChrFilterSize*c->chrDstH, fail);
+
+        for (i=0;i<c->vLumFilterSize*c->dstH;i++) {
+            int j;
+            short *p = (short *)&c->vYCoeffsBank[i];
+            for (j=0;j<8;j++)
+                p[j] = c->vLumFilter[i];
+        }
+
+        for (i=0;i<c->vChrFilterSize*c->chrDstH;i++) {
+            int j;
+            short *p = (short *)&c->vCCoeffsBank[i];
+            for (j=0;j<8;j++)
+                p[j] = c->vChrFilter[i];
+        }
+#endif
+    }
+
+    // calculate buffer sizes so that they won't run out while handling these damn slices
+    c->vLumBufSize= c->vLumFilterSize;
+    c->vChrBufSize= c->vChrFilterSize;
+    for (i=0; i<dstH; i++) {
+        int chrI= i*c->chrDstH / dstH;
+        int nextSlice= FFMAX(c->vLumFilterPos[i   ] + c->vLumFilterSize - 1,
+                           ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
+
+        nextSlice>>= c->chrSrcVSubSample;
+        nextSlice<<= c->chrSrcVSubSample;
+        if (c->vLumFilterPos[i   ] + c->vLumBufSize < nextSlice)
+            c->vLumBufSize= nextSlice - c->vLumFilterPos[i];
+        if (c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample))
+            c->vChrBufSize= (nextSlice>>c->chrSrcVSubSample) - c->vChrFilterPos[chrI];
+    }
+
+    // allocate pixbufs (we use dynamic allocation because otherwise we would need to
+    // allocate several megabytes to handle all possible cases)
+    FF_ALLOC_OR_GOTO(c, c->lumPixBuf, c->vLumBufSize*2*sizeof(int16_t*), fail);
+    FF_ALLOC_OR_GOTO(c, c->chrPixBuf, c->vChrBufSize*2*sizeof(int16_t*), fail);
+    if (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat))
+        FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf, c->vLumBufSize*2*sizeof(int16_t*), fail);
+    //Note we need at least one pixel more at the end because of the MMX code (just in case someone wanna replace the 4000/8000)
+    /* align at 16 bytes for AltiVec */
+    for (i=0; i<c->vLumBufSize; i++) {
+        FF_ALLOCZ_OR_GOTO(c, c->lumPixBuf[i+c->vLumBufSize], VOF+1, fail);
+        c->lumPixBuf[i] = c->lumPixBuf[i+c->vLumBufSize];
+    }
+    for (i=0; i<c->vChrBufSize; i++) {
+        FF_ALLOC_OR_GOTO(c, c->chrPixBuf[i+c->vChrBufSize], (VOF+1)*2, fail);
+        c->chrPixBuf[i] = c->chrPixBuf[i+c->vChrBufSize];
+    }
+    if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf)
+        for (i=0; i<c->vLumBufSize; i++) {
+            FF_ALLOCZ_OR_GOTO(c, c->alpPixBuf[i+c->vLumBufSize], VOF+1, fail);
+            c->alpPixBuf[i] = c->alpPixBuf[i+c->vLumBufSize];
+        }
+
+    //try to avoid drawing green stuff between the right end and the stride end
+    for (i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, (VOF+1)*2);
+
+    assert(2*VOFW == VOF);
+
+    assert(c->chrDstH <= dstH);
+
+    if (flags&SWS_PRINT_INFO) {
+        if (flags&SWS_FAST_BILINEAR)
+            av_log(c, AV_LOG_INFO, "FAST_BILINEAR scaler, ");
+        else if (flags&SWS_BILINEAR)
+            av_log(c, AV_LOG_INFO, "BILINEAR scaler, ");
+        else if (flags&SWS_BICUBIC)
+            av_log(c, AV_LOG_INFO, "BICUBIC scaler, ");
+        else if (flags&SWS_X)
+            av_log(c, AV_LOG_INFO, "Experimental scaler, ");
+        else if (flags&SWS_POINT)
+            av_log(c, AV_LOG_INFO, "Nearest Neighbor / POINT scaler, ");
+        else if (flags&SWS_AREA)
+            av_log(c, AV_LOG_INFO, "Area Averaging scaler, ");
+        else if (flags&SWS_BICUBLIN)
+            av_log(c, AV_LOG_INFO, "luma BICUBIC / chroma BILINEAR scaler, ");
+        else if (flags&SWS_GAUSS)
+            av_log(c, AV_LOG_INFO, "Gaussian scaler, ");
+        else if (flags&SWS_SINC)
+            av_log(c, AV_LOG_INFO, "Sinc scaler, ");
+        else if (flags&SWS_LANCZOS)
+            av_log(c, AV_LOG_INFO, "Lanczos scaler, ");
+        else if (flags&SWS_SPLINE)
+            av_log(c, AV_LOG_INFO, "Bicubic spline scaler, ");
+        else
+            av_log(c, AV_LOG_INFO, "ehh flags invalid?! ");
+
+        av_log(c, AV_LOG_INFO, "from %s to %s%s ",
+               sws_format_name(srcFormat),
+#ifdef DITHER1XBPP
+               dstFormat == PIX_FMT_BGR555 || dstFormat == PIX_FMT_BGR565 ||
+               dstFormat == PIX_FMT_RGB444BE || dstFormat == PIX_FMT_RGB444LE ||
+               dstFormat == PIX_FMT_BGR444BE || dstFormat == PIX_FMT_BGR444LE ? "dithered " : "",
+#else
+               "",
+#endif
+               sws_format_name(dstFormat));
+
+        if (flags & SWS_CPU_CAPS_MMX2)
+            av_log(c, AV_LOG_INFO, "using MMX2\n");
+        else if (flags & SWS_CPU_CAPS_3DNOW)
+            av_log(c, AV_LOG_INFO, "using 3DNOW\n");
+        else if (flags & SWS_CPU_CAPS_MMX)
+            av_log(c, AV_LOG_INFO, "using MMX\n");
+        else if (flags & SWS_CPU_CAPS_ALTIVEC)
+            av_log(c, AV_LOG_INFO, "using AltiVec\n");
+        else
+            av_log(c, AV_LOG_INFO, "using C\n");
+
+        if (flags & SWS_CPU_CAPS_MMX) {
+            if (c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR))
+                av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
+            else {
+                if (c->hLumFilterSize==4)
+                    av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal luminance scaling\n");
+                else if (c->hLumFilterSize==8)
+                    av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal luminance scaling\n");
+                else
+                    av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal luminance scaling\n");
+
+                if (c->hChrFilterSize==4)
+                    av_log(c, AV_LOG_VERBOSE, "using 4-tap MMX scaler for horizontal chrominance scaling\n");
+                else if (c->hChrFilterSize==8)
+                    av_log(c, AV_LOG_VERBOSE, "using 8-tap MMX scaler for horizontal chrominance scaling\n");
+                else
+                    av_log(c, AV_LOG_VERBOSE, "using n-tap MMX scaler for horizontal chrominance scaling\n");
+            }
+        } else {
+#if ARCH_X86
+            av_log(c, AV_LOG_VERBOSE, "using x86 asm scaler for horizontal scaling\n");
+#else
+            if (flags & SWS_FAST_BILINEAR)
+                av_log(c, AV_LOG_VERBOSE, "using FAST_BILINEAR C scaler for horizontal scaling\n");
+            else
+                av_log(c, AV_LOG_VERBOSE, "using C scaler for horizontal scaling\n");
+#endif
+        }
+        if (isPlanarYUV(dstFormat)) {
+            if (c->vLumFilterSize==1)
+                av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+            else
+                av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+        } else {
+            if (c->vLumFilterSize==1 && c->vChrFilterSize==2)
+                av_log(c, AV_LOG_VERBOSE, "using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
+                       "      2-tap scaler for vertical chrominance scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+            else if (c->vLumFilterSize==2 && c->vChrFilterSize==2)
+                av_log(c, AV_LOG_VERBOSE, "using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+            else
+                av_log(c, AV_LOG_VERBOSE, "using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+        }
+
+        if (dstFormat==PIX_FMT_BGR24)
+            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR24 converter\n",
+                   (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"));
+        else if (dstFormat==PIX_FMT_RGB32)
+            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR32 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+        else if (dstFormat==PIX_FMT_BGR565)
+            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR16 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+        else if (dstFormat==PIX_FMT_BGR555)
+            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR15 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+        else if (dstFormat == PIX_FMT_RGB444BE || dstFormat == PIX_FMT_RGB444LE ||
+                 dstFormat == PIX_FMT_BGR444BE || dstFormat == PIX_FMT_BGR444LE)
+            av_log(c, AV_LOG_VERBOSE, "using %s YV12->BGR12 converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+
+        av_log(c, AV_LOG_VERBOSE, "%dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
+        av_log(c, AV_LOG_DEBUG, "lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
+               c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
+        av_log(c, AV_LOG_DEBUG, "chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
+               c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc);
+    }
+
+    c->swScale= ff_getSwsFunc(c);
+    return c;
+
+fail:
+    sws_freeContext(c);
+    return NULL;
+}
+
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+                                float lumaSharpen, float chromaSharpen,
+                                float chromaHShift, float chromaVShift,
+                                int verbose)
+{
+    SwsFilter *filter= av_malloc(sizeof(SwsFilter));
+    if (!filter)
+        return NULL;
+
+    if (lumaGBlur!=0.0) {
+        filter->lumH= sws_getGaussianVec(lumaGBlur, 3.0);
+        filter->lumV= sws_getGaussianVec(lumaGBlur, 3.0);
+    } else {
+        filter->lumH= sws_getIdentityVec();
+        filter->lumV= sws_getIdentityVec();
+    }
+
+    if (chromaGBlur!=0.0) {
+        filter->chrH= sws_getGaussianVec(chromaGBlur, 3.0);
+        filter->chrV= sws_getGaussianVec(chromaGBlur, 3.0);
+    } else {
+        filter->chrH= sws_getIdentityVec();
+        filter->chrV= sws_getIdentityVec();
+    }
+
+    if (chromaSharpen!=0.0) {
+        SwsVector *id= sws_getIdentityVec();
+        sws_scaleVec(filter->chrH, -chromaSharpen);
+        sws_scaleVec(filter->chrV, -chromaSharpen);
+        sws_addVec(filter->chrH, id);
+        sws_addVec(filter->chrV, id);
+        sws_freeVec(id);
+    }
+
+    if (lumaSharpen!=0.0) {
+        SwsVector *id= sws_getIdentityVec();
+        sws_scaleVec(filter->lumH, -lumaSharpen);
+        sws_scaleVec(filter->lumV, -lumaSharpen);
+        sws_addVec(filter->lumH, id);
+        sws_addVec(filter->lumV, id);
+        sws_freeVec(id);
+    }
+
+    if (chromaHShift != 0.0)
+        sws_shiftVec(filter->chrH, (int)(chromaHShift+0.5));
+
+    if (chromaVShift != 0.0)
+        sws_shiftVec(filter->chrV, (int)(chromaVShift+0.5));
+
+    sws_normalizeVec(filter->chrH, 1.0);
+    sws_normalizeVec(filter->chrV, 1.0);
+    sws_normalizeVec(filter->lumH, 1.0);
+    sws_normalizeVec(filter->lumV, 1.0);
+
+    if (verbose) sws_printVec2(filter->chrH, NULL, AV_LOG_DEBUG);
+    if (verbose) sws_printVec2(filter->lumH, NULL, AV_LOG_DEBUG);
+
+    return filter;
+}
+
+SwsVector *sws_allocVec(int length)
+{
+    SwsVector *vec = av_malloc(sizeof(SwsVector));
+    if (!vec)
+        return NULL;
+    vec->length = length;
+    vec->coeff  = av_malloc(sizeof(double) * length);
+    if (!vec->coeff)
+        av_freep(&vec);
+    return vec;
+}
+
+SwsVector *sws_getGaussianVec(double variance, double quality)
+{
+    const int length= (int)(variance*quality + 0.5) | 1;
+    int i;
+    double middle= (length-1)*0.5;
+    SwsVector *vec= sws_allocVec(length);
+
+    if (!vec)
+        return NULL;
+
+    for (i=0; i<length; i++) {
+        double dist= i-middle;
+        vec->coeff[i]= exp(-dist*dist/(2*variance*variance)) / sqrt(2*variance*M_PI);
+    }
+
+    sws_normalizeVec(vec, 1.0);
+
+    return vec;
+}
+
+SwsVector *sws_getConstVec(double c, int length)
+{
+    int i;
+    SwsVector *vec= sws_allocVec(length);
+
+    if (!vec)
+        return NULL;
+
+    for (i=0; i<length; i++)
+        vec->coeff[i]= c;
+
+    return vec;
+}
+
+SwsVector *sws_getIdentityVec(void)
+{
+    return sws_getConstVec(1.0, 1);
+}
+
+static double sws_dcVec(SwsVector *a)
+{
+    int i;
+    double sum=0;
+
+    for (i=0; i<a->length; i++)
+        sum+= a->coeff[i];
+
+    return sum;
+}
+
+void sws_scaleVec(SwsVector *a, double scalar)
+{
+    int i;
+
+    for (i=0; i<a->length; i++)
+        a->coeff[i]*= scalar;
+}
+
+void sws_normalizeVec(SwsVector *a, double height)
+{
+    sws_scaleVec(a, height/sws_dcVec(a));
+}
+
+static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b)
+{
+    int length= a->length + b->length - 1;
+    int i, j;
+    SwsVector *vec= sws_getConstVec(0.0, length);
+
+    if (!vec)
+        return NULL;
+
+    for (i=0; i<a->length; i++) {
+        for (j=0; j<b->length; j++) {
+            vec->coeff[i+j]+= a->coeff[i]*b->coeff[j];
+        }
+    }
+
+    return vec;
+}
+
+static SwsVector *sws_sumVec(SwsVector *a, SwsVector *b)
+{
+    int length= FFMAX(a->length, b->length);
+    int i;
+    SwsVector *vec= sws_getConstVec(0.0, length);
+
+    if (!vec)
+        return NULL;
+
+    for (i=0; i<a->length; i++) vec->coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
+    for (i=0; i<b->length; i++) vec->coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i];
+
+    return vec;
+}
+
+static SwsVector *sws_diffVec(SwsVector *a, SwsVector *b)
+{
+    int length= FFMAX(a->length, b->length);
+    int i;
+    SwsVector *vec= sws_getConstVec(0.0, length);
+
+    if (!vec)
+        return NULL;
+
+    for (i=0; i<a->length; i++) vec->coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
+    for (i=0; i<b->length; i++) vec->coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i];
+
+    return vec;
+}
+
+/* shift left / or right if "shift" is negative */
+static SwsVector *sws_getShiftedVec(SwsVector *a, int shift)
+{
+    int length= a->length + FFABS(shift)*2;
+    int i;
+    SwsVector *vec= sws_getConstVec(0.0, length);
+
+    if (!vec)
+        return NULL;
+
+    for (i=0; i<a->length; i++) {
+        vec->coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i];
+    }
+
+    return vec;
+}
+
+void sws_shiftVec(SwsVector *a, int shift)
+{
+    SwsVector *shifted= sws_getShiftedVec(a, shift);
+    av_free(a->coeff);
+    a->coeff= shifted->coeff;
+    a->length= shifted->length;
+    av_free(shifted);
+}
+
+void sws_addVec(SwsVector *a, SwsVector *b)
+{
+    SwsVector *sum= sws_sumVec(a, b);
+    av_free(a->coeff);
+    a->coeff= sum->coeff;
+    a->length= sum->length;
+    av_free(sum);
+}
+
+void sws_subVec(SwsVector *a, SwsVector *b)
+{
+    SwsVector *diff= sws_diffVec(a, b);
+    av_free(a->coeff);
+    a->coeff= diff->coeff;
+    a->length= diff->length;
+    av_free(diff);
+}
+
+void sws_convVec(SwsVector *a, SwsVector *b)
+{
+    SwsVector *conv= sws_getConvVec(a, b);
+    av_free(a->coeff);
+    a->coeff= conv->coeff;
+    a->length= conv->length;
+    av_free(conv);
+}
+
+SwsVector *sws_cloneVec(SwsVector *a)
+{
+    int i;
+    SwsVector *vec= sws_allocVec(a->length);
+
+    if (!vec)
+        return NULL;
+
+    for (i=0; i<a->length; i++) vec->coeff[i]= a->coeff[i];
+
+    return vec;
+}
+
+void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level)
+{
+    int i;
+    double max=0;
+    double min=0;
+    double range;
+
+    for (i=0; i<a->length; i++)
+        if (a->coeff[i]>max) max= a->coeff[i];
+
+    for (i=0; i<a->length; i++)
+        if (a->coeff[i]<min) min= a->coeff[i];
+
+    range= max - min;
+
+    for (i=0; i<a->length; i++) {
+        int x= (int)((a->coeff[i]-min)*60.0/range +0.5);
+        av_log(log_ctx, log_level, "%1.3f ", a->coeff[i]);
+        for (;x>0; x--) av_log(log_ctx, log_level, " ");
+        av_log(log_ctx, log_level, "|\n");
+    }
+}
+
+#if LIBSWSCALE_VERSION_MAJOR < 1
+void sws_printVec(SwsVector *a)
+{
+    sws_printVec2(a, NULL, AV_LOG_DEBUG);
+}
+#endif
+
+void sws_freeVec(SwsVector *a)
+{
+    if (!a) return;
+    av_freep(&a->coeff);
+    a->length=0;
+    av_free(a);
+}
+
+void sws_freeFilter(SwsFilter *filter)
+{
+    if (!filter) return;
+
+    if (filter->lumH) sws_freeVec(filter->lumH);
+    if (filter->lumV) sws_freeVec(filter->lumV);
+    if (filter->chrH) sws_freeVec(filter->chrH);
+    if (filter->chrV) sws_freeVec(filter->chrV);
+    av_free(filter);
+}
+
+void sws_freeContext(SwsContext *c)
+{
+    int i;
+    if (!c) return;
+
+    if (c->lumPixBuf) {
+        for (i=0; i<c->vLumBufSize; i++)
+            av_freep(&c->lumPixBuf[i]);
+        av_freep(&c->lumPixBuf);
+    }
+
+    if (c->chrPixBuf) {
+        for (i=0; i<c->vChrBufSize; i++)
+            av_freep(&c->chrPixBuf[i]);
+        av_freep(&c->chrPixBuf);
+    }
+
+    if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
+        for (i=0; i<c->vLumBufSize; i++)
+            av_freep(&c->alpPixBuf[i]);
+        av_freep(&c->alpPixBuf);
+    }
+
+    av_freep(&c->vLumFilter);
+    av_freep(&c->vChrFilter);
+    av_freep(&c->hLumFilter);
+    av_freep(&c->hChrFilter);
+#if HAVE_ALTIVEC
+    av_freep(&c->vYCoeffsBank);
+    av_freep(&c->vCCoeffsBank);
+#endif
+
+    av_freep(&c->vLumFilterPos);
+    av_freep(&c->vChrFilterPos);
+    av_freep(&c->hLumFilterPos);
+    av_freep(&c->hChrFilterPos);
+
+#if ARCH_X86
+#ifdef MAP_ANONYMOUS
+    if (c->lumMmx2FilterCode) munmap(c->lumMmx2FilterCode, c->lumMmx2FilterCodeSize);
+    if (c->chrMmx2FilterCode) munmap(c->chrMmx2FilterCode, c->chrMmx2FilterCodeSize);
+#elif HAVE_VIRTUALALLOC
+    if (c->lumMmx2FilterCode) VirtualFree(c->lumMmx2FilterCode, 0, MEM_RELEASE);
+    if (c->chrMmx2FilterCode) VirtualFree(c->chrMmx2FilterCode, 0, MEM_RELEASE);
+#else
+    av_free(c->lumMmx2FilterCode);
+    av_free(c->chrMmx2FilterCode);
+#endif
+    c->lumMmx2FilterCode=NULL;
+    c->chrMmx2FilterCode=NULL;
+#endif /* ARCH_X86 */
+
+    av_freep(&c->yuvTable);
+
+    av_free(c);
+}
+
+struct SwsContext *sws_getCachedContext(struct SwsContext *context,
+                                        int srcW, int srcH, enum PixelFormat srcFormat,
+                                        int dstW, int dstH, enum PixelFormat dstFormat, int flags,
+                                        SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
+{
+    static const double default_param[2] = {SWS_PARAM_DEFAULT, SWS_PARAM_DEFAULT};
+
+    if (!param)
+        param = default_param;
+
+    if (context &&
+        (context->srcW      != srcW      ||
+         context->srcH      != srcH      ||
+         context->srcFormat != srcFormat ||
+         context->dstW      != dstW      ||
+         context->dstH      != dstH      ||
+         context->dstFormat != dstFormat ||
+         context->flags     != flags     ||
+         context->param[0]  != param[0]  ||
+         context->param[1]  != param[1])) {
+        sws_freeContext(context);
+        context = NULL;
+    }
+
+    if (!context) {
+        return sws_getContext(srcW, srcH, srcFormat,
+                              dstW, dstH, dstFormat, flags,
+                              srcFilter, dstFilter, param);
+    }
+    return context;
+}
+

Added: branches/0.6/libswscale/x86/yuv2rgb_mmx.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/x86/yuv2rgb_mmx.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,109 @@
+/*
+ * software YUV to RGB converter
+ *
+ * Copyright (C) 2009 Konstantin Shishkov
+ *
+ * MMX/MMX2 template stuff (needed for fast movntq support),
+ * 1,4,8bpp support and context / deglobalize stuff
+ * by Michael Niedermayer (michaelni at gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include "config.h"
+#include "libswscale/rgb2rgb.h"
+#include "libswscale/swscale.h"
+#include "libswscale/swscale_internal.h"
+#include "libavutil/x86_cpu.h"
+
+#define DITHER1XBPP // only for MMX
+
+/* hope these constant values are cache line aligned */
+DECLARE_ASM_CONST(8, uint64_t, mmx_00ffw)   = 0x00ff00ff00ff00ffULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_redmask) = 0xf8f8f8f8f8f8f8f8ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_grnmask) = 0xfcfcfcfcfcfcfcfcULL;
+
+//MMX versions
+#undef RENAME
+#undef HAVE_MMX2
+#undef HAVE_AMD3DNOW
+#define HAVE_MMX2 0
+#define HAVE_AMD3DNOW 0
+#define RENAME(a) a ## _MMX
+#if CONFIG_GPL
+#include "yuv2rgb_template.c"
+#else
+#include "yuv2rgb_template2.c"
+#endif
+
+//MMX2 versions
+#undef RENAME
+#undef HAVE_MMX2
+#define HAVE_MMX2 1
+#define RENAME(a) a ## _MMX2
+#if CONFIG_GPL
+#include "yuv2rgb_template.c"
+#else
+#include "yuv2rgb_template2.c"
+#endif
+
+SwsFunc ff_yuv2rgb_init_mmx(SwsContext *c)
+{
+    if (c->flags & SWS_CPU_CAPS_MMX2) {
+        switch (c->dstFormat) {
+        case PIX_FMT_RGB32:
+            if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) {
+                if (HAVE_7REGS) return yuva420_rgb32_MMX2;
+                break;
+            } else return yuv420_rgb32_MMX2;
+        case PIX_FMT_BGR32:
+            if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) {
+                if (HAVE_7REGS) return yuva420_bgr32_MMX2;
+                break;
+            } else return yuv420_bgr32_MMX2;
+        case PIX_FMT_RGB24:  return yuv420_rgb24_MMX2;
+        case PIX_FMT_BGR24:  return yuv420_bgr24_MMX2;
+        case PIX_FMT_RGB565: return yuv420_rgb16_MMX2;
+        case PIX_FMT_RGB555: return yuv420_rgb15_MMX2;
+        }
+    }
+    if (c->flags & SWS_CPU_CAPS_MMX) {
+        switch (c->dstFormat) {
+        case PIX_FMT_RGB32:
+            if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) {
+                if (HAVE_7REGS) return yuva420_rgb32_MMX;
+                break;
+            } else return yuv420_rgb32_MMX;
+        case PIX_FMT_BGR32:
+            if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) {
+                if (HAVE_7REGS) return yuva420_bgr32_MMX;
+                break;
+            } else return yuv420_bgr32_MMX;
+        case PIX_FMT_RGB24:  return yuv420_rgb24_MMX;
+        case PIX_FMT_BGR24:  return yuv420_bgr24_MMX;
+        case PIX_FMT_RGB565: return yuv420_rgb16_MMX;
+        case PIX_FMT_RGB555: return yuv420_rgb15_MMX;
+        }
+    }
+
+    return NULL;
+}

Added: branches/0.6/libswscale/x86/yuv2rgb_template.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/x86/yuv2rgb_template.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,564 @@
+/*
+ * yuv2rgb_mmx.c, software YUV to RGB converter with Intel MMX "technology"
+ *
+ * Copyright (C) 2000, Silicon Integrated System Corp
+ *
+ * Author: Olie Lho <ollie at sis.com.tw>
+ *
+ * 15,24 bpp and dithering from Michael Niedermayer (michaelni at gmx.at)
+ * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support)
+ * context / deglobalize stuff by Michael Niedermayer
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video decoder
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with mpeg2dec; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef MOVNTQ
+#undef EMMS
+#undef SFENCE
+
+#if HAVE_AMD3DNOW
+/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
+#define EMMS     "femms"
+#else
+#define EMMS     "emms"
+#endif
+
+#if HAVE_MMX2
+#define MOVNTQ "movntq"
+#define SFENCE "sfence"
+#else
+#define MOVNTQ "movq"
+#define SFENCE " # nop"
+#endif
+
+#define YUV2RGB \
+    /* Do the multiply part of the conversion for even and odd pixels,
+       register usage:
+       mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+       mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+       mm6 -> Y even, mm7 -> Y odd */\
+    /* convert the chroma part */\
+    "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+    "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+\
+    "psllw $3, %%mm0;" /* Promote precision */ \
+    "psllw $3, %%mm1;" /* Promote precision */ \
+\
+    "psubsw "U_OFFSET"(%4), %%mm0;" /* Cb -= 128 */ \
+    "psubsw "V_OFFSET"(%4), %%mm1;" /* Cr -= 128 */ \
+\
+    "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+    "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+\
+    "pmulhw "UG_COEFF"(%4), %%mm2;" /* Mul Cb with green coeff -> Cb green */ \
+    "pmulhw "VG_COEFF"(%4), %%mm3;" /* Mul Cr with green coeff -> Cr green */ \
+\
+    "pmulhw "UB_COEFF"(%4), %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\
+    "pmulhw "VR_COEFF"(%4), %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\
+\
+    "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\
+\
+    /* convert the luma part */\
+    "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
+    "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
+\
+    "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\
+\
+    "psllw $3, %%mm6;" /* Promote precision */\
+    "psllw $3, %%mm7;" /* Promote precision */\
+\
+    "psubw "Y_OFFSET"(%4), %%mm6;" /* Y -= 16 */\
+    "psubw "Y_OFFSET"(%4), %%mm7;" /* Y -= 16 */\
+\
+    "pmulhw "Y_COEFF"(%4), %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\
+    "pmulhw "Y_COEFF"(%4), %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\
+\
+    /* Do the addition part of the conversion for even and odd pixels,
+       register usage:
+       mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+       mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+       mm6 -> Y even, mm7 -> Y odd */\
+    "movq %%mm0, %%mm3;" /* Copy Cblue */\
+    "movq %%mm1, %%mm4;" /* Copy Cred */\
+    "movq %%mm2, %%mm5;" /* Copy Cgreen */\
+\
+    "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */\
+    "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */\
+\
+    "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */\
+    "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */\
+\
+    "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */\
+    "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */\
+\
+    /* Limit RGB even to 0..255 */\
+    "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0  B6 B4 B2 B0 */\
+    "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0  R6 R4 R2 R0 */\
+    "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0  G6 G4 G2 G0 */\
+\
+    /* Limit RGB odd to 0..255 */\
+    "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1  B7 B5 B3 B1 */\
+    "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1  R7 R5 R3 R1 */\
+    "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1  G7 G5 G3 G1 */\
+\
+    /* Interleave RGB even and odd */\
+    "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\
+    "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\
+    "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\
+
+
+#define YUV422_UNSHIFT                   \
+    if(c->srcFormat == PIX_FMT_YUV422P) {\
+        srcStride[1] *= 2;               \
+        srcStride[2] *= 2;               \
+    }                                    \
+
+#define YUV2RGB_LOOP(depth)                                   \
+    h_size= (c->dstW+7)&~7;                                   \
+    if(h_size*depth > FFABS(dstStride[0])) h_size-=8;         \
+\
+    __asm__ volatile ("pxor %mm4, %mm4;" /* zero mm4 */ );    \
+    for (y= 0; y<srcSliceH; y++ ) {                           \
+        uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0]; \
+        const uint8_t *py = src[0] + y*srcStride[0];          \
+        const uint8_t *pu = src[1] + (y>>1)*srcStride[1];     \
+        const uint8_t *pv = src[2] + (y>>1)*srcStride[2];     \
+        x86_reg index= -h_size/2;                                \
+
+#define YUV2RGB_INIT                                                       \
+        /* This MMX assembly code deals with a SINGLE scan line at a time, \
+         * it converts 8 pixels in each iteration. */                      \
+        __asm__ volatile (                                                 \
+        /* load data for start of next scan line */                        \
+        "movd    (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \
+        "movd    (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \
+        "movq (%5, %0, 2), %%mm6;" /* Load 8  Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ \
+        /*                                                                 \
+        ".balign 16     \n\t"                                              \
+        */                                                                 \
+        "1:             \n\t"                                              \
+        /* No speed difference on my p3 at 500 with prefetch,                 \
+         * if it is faster for anyone with -benchmark then tell me.        \
+        PREFETCH" 64(%0) \n\t"                                             \
+        PREFETCH" 64(%1) \n\t"                                             \
+        PREFETCH" 64(%2) \n\t"                                             \
+        */                                                                 \
+
+#define YUV2RGB_ENDLOOP(depth) \
+        "add $"AV_STRINGIFY(depth*8)", %1    \n\t" \
+        "add                       $4, %0    \n\t" \
+        " js                       1b        \n\t" \
+
+#define YUV2RGB_OPERANDS \
+        : "+r" (index), "+r" (image) \
+        : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index) \
+        ); \
+    } \
+    __asm__ volatile (SFENCE"\n\t"EMMS); \
+    return srcSliceH; \
+
+#define YUV2RGB_OPERANDS_ALPHA \
+        : "+r" (index), "+r" (image) \
+        : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index), "r" (pa - 2*index) \
+        ); \
+    } \
+    __asm__ volatile (SFENCE"\n\t"EMMS); \
+    return srcSliceH; \
+
+static inline int RENAME(yuv420_rgb16)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                       int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV422_UNSHIFT
+    YUV2RGB_LOOP(2)
+
+        c->blueDither= ff_dither8[y&1];
+        c->greenDither= ff_dither4[y&1];
+        c->redDither= ff_dither8[(y+1)&1];
+
+        YUV2RGB_INIT
+        YUV2RGB
+
+#ifdef DITHER1XBPP
+        "paddusb "BLUE_DITHER"(%4), %%mm0;"
+        "paddusb "GREEN_DITHER"(%4), %%mm2;"
+        "paddusb "RED_DITHER"(%4), %%mm1;"
+#endif
+        /* mask unneeded bits off */
+        "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+        "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
+        "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+        "psrlw   $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+        "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+        "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+        "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+        /* convert RGB24 plane to RGB16 pack for pixel 0-3 */
+        "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+        "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+        "psllw  $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+        "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+
+        "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+        MOVNTQ "      %%mm0, (%1);" /* store pixel 0-3 */
+
+        /* convert RGB24 plane to RGB16 pack for pixel 0-3 */
+        "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+        "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+        "psllw        $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+        "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+        "por       %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+        "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+        MOVNTQ "   %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+    YUV2RGB_ENDLOOP(2)
+    YUV2RGB_OPERANDS
+}
+
+static inline int RENAME(yuv420_rgb15)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                       int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV422_UNSHIFT
+    YUV2RGB_LOOP(2)
+
+        c->blueDither= ff_dither8[y&1];
+        c->greenDither= ff_dither8[y&1];
+        c->redDither= ff_dither8[(y+1)&1];
+
+        YUV2RGB_INIT
+        YUV2RGB
+
+#ifdef DITHER1XBPP
+        "paddusb "BLUE_DITHER"(%4), %%mm0  \n\t"
+        "paddusb "GREEN_DITHER"(%4), %%mm2  \n\t"
+        "paddusb "RED_DITHER"(%4), %%mm1  \n\t"
+#endif
+
+        /* mask unneeded bits off */
+        "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+        "pand "MANGLE(mmx_redmask)", %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */
+        "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+        "psrlw   $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+        "psrlw   $1, %%mm1;" /* 0_r7r6r5  r4r3_0_0 0_r7r6r5 r4r3_0_0 */
+        "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+        "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+        "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+        /* convert RGB24 plane to RGB16 pack for pixel 0-3 */
+        "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3_0_0_0 */
+        "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+        "psllw  $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+        "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+
+        "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+        MOVNTQ "      %%mm0, (%1);"  /* store pixel 0-3 */
+
+        /* convert RGB24 plane to RGB16 pack for pixel 0-3 */
+        "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */
+        "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+        "psllw        $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+        "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+        "por       %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+        "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+        MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+    YUV2RGB_ENDLOOP(2)
+    YUV2RGB_OPERANDS
+}
+
+#undef RGB_PLANAR2PACKED24
+#if HAVE_MMX2
+#define RGB_PLANAR2PACKED24(red, blue)\
+        "movq "MANGLE(ff_M24A)", %%mm4     \n\t"\
+        "movq "MANGLE(ff_M24C)", %%mm7     \n\t"\
+        "pshufw $0x50, %%mm"blue", %%mm5   \n\t" /* B3 B2 B3 B2  B1 B0 B1 B0 */\
+        "pshufw $0x50, %%mm2, %%mm3     \n\t" /* G3 G2 G3 G2  G1 G0 G1 G0 */\
+        "pshufw $0x00, %%mm"red", %%mm6 \n\t" /* R1 R0 R1 R0  R1 R0 R1 R0 */\
+\
+        "pand   %%mm4, %%mm5            \n\t" /*    B2        B1       B0 */\
+        "pand   %%mm4, %%mm3            \n\t" /*    G2        G1       G0 */\
+        "pand   %%mm7, %%mm6            \n\t" /*       R1        R0       */\
+\
+        "psllq     $8, %%mm3            \n\t" /* G2        G1       G0    */\
+        "por    %%mm5, %%mm6            \n\t"\
+        "por    %%mm3, %%mm6            \n\t"\
+        MOVNTQ" %%mm6, (%1)             \n\t"\
+\
+        "psrlq     $8, %%mm2            \n\t" /* 00 G7 G6 G5  G4 G3 G2 G1 */\
+        "pshufw $0xA5, %%mm"blue", %%mm5\n\t" /* B5 B4 B5 B4  B3 B2 B3 B2 */\
+        "pshufw $0x55, %%mm2, %%mm3     \n\t" /* G4 G3 G4 G3  G4 G3 G4 G3 */\
+        "pshufw $0xA5, %%mm"red", %%mm6 \n\t" /* R5 R4 R5 R4  R3 R2 R3 R2 */\
+\
+        "pand "MANGLE(ff_M24B)", %%mm5  \n\t" /* B5       B4        B3    */\
+        "pand          %%mm7, %%mm3     \n\t" /*       G4        G3       */\
+        "pand          %%mm4, %%mm6     \n\t" /*    R4        R3       R2 */\
+\
+        "por    %%mm5, %%mm3            \n\t" /* B5    G4 B4     G3 B3    */\
+        "por    %%mm3, %%mm6            \n\t"\
+        MOVNTQ" %%mm6, 8(%1)            \n\t"\
+\
+        "pshufw $0xFF, %%mm"blue", %%mm5\n\t" /* B7 B6 B7 B6  B7 B6 B6 B7 */\
+        "pshufw $0xFA, %%mm2, %%mm3     \n\t" /* 00 G7 00 G7  G6 G5 G6 G5 */\
+        "pshufw $0xFA, %%mm"red", %%mm6 \n\t" /* R7 R6 R7 R6  R5 R4 R5 R4 */\
+        "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */\
+\
+        "pand          %%mm7, %%mm5     \n\t" /*       B7        B6       */\
+        "pand          %%mm4, %%mm3     \n\t" /*    G7        G6       G5 */\
+        "pand "MANGLE(ff_M24B)", %%mm6  \n\t" /* R7       R6        R5    */\
+        "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */\
+\
+        "por          %%mm5, %%mm3      \n\t"\
+        "por          %%mm3, %%mm6      \n\t"\
+        MOVNTQ"       %%mm6, 16(%1)     \n\t"\
+        "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
+        "pxor         %%mm4, %%mm4      \n\t"
+#else
+#define RGB_PLANAR2PACKED24(red, blue)\
+        "pxor      %%mm4, %%mm4     \n\t"\
+        "movq      %%mm"blue", %%mm5\n\t" /* B */\
+        "movq      %%mm"red", %%mm6 \n\t" /* R */\
+        "punpcklbw %%mm2, %%mm"blue"\n\t" /* GBGBGBGB 0 */\
+        "punpcklbw %%mm4, %%mm"red" \n\t" /* 0R0R0R0R 0 */\
+        "punpckhbw %%mm2, %%mm5     \n\t" /* GBGBGBGB 2 */\
+        "punpckhbw %%mm4, %%mm6     \n\t" /* 0R0R0R0R 2 */\
+        "movq      %%mm"blue", %%mm7\n\t" /* GBGBGBGB 0 */\
+        "movq      %%mm5, %%mm3     \n\t" /* GBGBGBGB 2 */\
+        "punpcklwd %%mm"red", %%mm7 \n\t" /* 0RGB0RGB 0 */\
+        "punpckhwd %%mm"red", %%mm"blue"\n\t" /* 0RGB0RGB 1 */\
+        "punpcklwd %%mm6, %%mm5     \n\t" /* 0RGB0RGB 2 */\
+        "punpckhwd %%mm6, %%mm3     \n\t" /* 0RGB0RGB 3 */\
+\
+        "movq      %%mm7, %%mm2     \n\t" /* 0RGB0RGB 0 */\
+        "movq      %%mm"blue", %%mm6\n\t" /* 0RGB0RGB 1 */\
+        "movq      %%mm5, %%mm"red" \n\t" /* 0RGB0RGB 2 */\
+        "movq      %%mm3, %%mm4     \n\t" /* 0RGB0RGB 3 */\
+\
+        "psllq       $40, %%mm7     \n\t" /* RGB00000 0 */\
+        "psllq       $40, %%mm"blue"\n\t" /* RGB00000 1 */\
+        "psllq       $40, %%mm5     \n\t" /* RGB00000 2 */\
+        "psllq       $40, %%mm3     \n\t" /* RGB00000 3 */\
+\
+        "punpckhdq %%mm2, %%mm7     \n\t" /* 0RGBRGB0 0 */\
+        "punpckhdq %%mm6, %%mm"blue"\n\t" /* 0RGBRGB0 1 */\
+        "punpckhdq %%mm"red", %%mm5 \n\t" /* 0RGBRGB0 2 */\
+        "punpckhdq %%mm4, %%mm3     \n\t" /* 0RGBRGB0 3 */\
+\
+        "psrlq        $8, %%mm7     \n\t" /* 00RGBRGB 0 */\
+        "movq      %%mm"blue", %%mm6\n\t" /* 0RGBRGB0 1 */\
+        "psllq       $40, %%mm"blue"\n\t" /* GB000000 1 */\
+        "por       %%mm"blue", %%mm7\n\t" /* GBRGBRGB 0 */\
+        MOVNTQ"    %%mm7, (%1)      \n\t"\
+\
+        "psrlq       $24, %%mm6     \n\t" /* 0000RGBR 1 */\
+        "movq      %%mm5, %%mm"red" \n\t" /* 0RGBRGB0 2 */\
+        "psllq       $24, %%mm5     \n\t" /* BRGB0000 2 */\
+        "por       %%mm5, %%mm6     \n\t" /* BRGBRGBR 1 */\
+        MOVNTQ"    %%mm6, 8(%1)     \n\t"\
+\
+        "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
+\
+        "psrlq       $40, %%mm"red" \n\t" /* 000000RG 2 */\
+        "psllq        $8, %%mm3     \n\t" /* RGBRGB00 3 */\
+        "por       %%mm3, %%mm"red" \n\t" /* RGBRGBRG 2 */\
+        MOVNTQ"    %%mm"red", 16(%1)\n\t"\
+\
+        "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */\
+        "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */\
+        "pxor      %%mm4, %%mm4     \n\t"
+#endif
+
+static inline int RENAME(yuv420_rgb24)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                       int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV422_UNSHIFT
+    YUV2RGB_LOOP(3)
+
+        YUV2RGB_INIT
+        YUV2RGB
+        /* mm0=B, %%mm2=G, %%mm1=R */
+        RGB_PLANAR2PACKED24("0", "1")
+
+    YUV2RGB_ENDLOOP(3)
+    YUV2RGB_OPERANDS
+}
+
+static inline int RENAME(yuv420_bgr24)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                       int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV422_UNSHIFT
+    YUV2RGB_LOOP(3)
+
+        YUV2RGB_INIT
+        YUV2RGB
+        /* mm0=B, %%mm2=G, %%mm1=R */
+        RGB_PLANAR2PACKED24("1", "0")
+
+    YUV2RGB_ENDLOOP(3)
+    YUV2RGB_OPERANDS
+}
+
+/*
+
+RGB_PLANAR2PACKED32(red,green,blue,alpha)
+
+convert RGB plane to RGB packed format
+
+macro parameters specify the output color channel order:
+
+RGB_PLANAR2PACKED32(REG_RED,  REG_GREEN, REG_BLUE, REG_ALPHA) for RGBA output,
+RGB_PLANAR2PACKED32(REG_BLUE, REG_GREEN, REG_RED,  REG_ALPHA) for BGRA output,
+RGB_PLANAR2PACKED32(REG_ALPHA,REG_BLUE,  REG_GREEN,REG_RED)   for ABGR output,
+
+etc.
+*/
+
+#define REG_BLUE  "0"
+#define REG_RED   "1"
+#define REG_GREEN "2"
+#define REG_ALPHA "3"
+
+#define RGB_PLANAR2PACKED32(red,green,blue,alpha)                       \
+    /* convert RGB plane to RGB packed format,                          \
+       mm0 ->  B, mm1 -> R, mm2 -> G, mm3 -> A,                         \
+       mm4 -> GB, mm5 -> AR pixel 4-7,                                  \
+       mm6 -> GB, mm7 -> AR pixel 0-3 */                                \
+    "movq      %%mm" blue ", %%mm6;"   /* B7 B6 B5 B4 B3 B2 B1 B0 */    \
+    "movq      %%mm" red  ", %%mm7;"   /* R7 R6 R5 R4 R3 R2 R1 R0 */    \
+\
+    "movq      %%mm" blue ", %%mm4;"   /* B7 B6 B5 B4 B3 B2 B1 B0 */    \
+    "movq      %%mm" red  ", %%mm5;"   /* R7 R6 R5 R4 R3 R2 R1 R0 */    \
+\
+    "punpcklbw %%mm" green ", %%mm6;"  /* G3 B3 G2 B2 G1 B1 G0 B0 */    \
+    "punpcklbw %%mm" alpha ", %%mm7;"  /* A3 R3 A2 R2 A1 R1 A0 R0 */    \
+\
+    "punpcklwd %%mm7, %%mm6;"          /* A1 R1 B1 G1 A0 R0 B0 G0 */    \
+    MOVNTQ "   %%mm6, (%1);"           /* Store ARGB1 ARGB0 */          \
+\
+    "movq      %%mm" blue ", %%mm6;"   /* B7 B6 B5 B4 B3 B2 B1 B0 */    \
+    "punpcklbw %%mm" green ", %%mm6;"  /* G3 B3 G2 B2 G1 B1 G0 B0 */    \
+\
+    "punpckhwd %%mm7, %%mm6;"          /* A3 R3 G3 B3 A2 R2 B3 G2 */    \
+    MOVNTQ "   %%mm6, 8 (%1);"         /* Store ARGB3 ARGB2 */          \
+\
+    "punpckhbw %%mm" green ", %%mm4;"  /* G7 B7 G6 B6 G5 B5 G4 B4 */    \
+    "punpckhbw %%mm" alpha ", %%mm5;"  /* A7 R7 A6 R6 A5 R5 A4 R4 */    \
+\
+    "punpcklwd %%mm5, %%mm4;"          /* A5 R5 B5 G5 A4 R4 B4 G4 */    \
+    MOVNTQ "   %%mm4, 16 (%1);"        /* Store ARGB5 ARGB4 */          \
+\
+    "movq      %%mm" blue ", %%mm4;"   /* B7 B6 B5 B4 B3 B2 B1 B0 */    \
+    "punpckhbw %%mm" green ", %%mm4;"  /* G7 B7 G6 B6 G5 B5 G4 B4 */    \
+\
+    "punpckhwd %%mm5, %%mm4;"   /* A7 R7 G7 B7 A6 R6 B6 G6 */           \
+    MOVNTQ "   %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */                 \
+\
+    "movd 4 (%2, %0), %%mm0;"   /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */ \
+    "movd 4 (%3, %0), %%mm1;"   /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */ \
+\
+    "pxor         %%mm4, %%mm4;" /* zero mm4 */                         \
+    "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */ \
+
+static inline int RENAME(yuv420_rgb32)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                       int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV422_UNSHIFT
+    YUV2RGB_LOOP(4)
+
+        YUV2RGB_INIT
+        YUV2RGB
+        "pcmpeqd   %%mm3, %%mm3;"   /* fill mm3 */
+        RGB_PLANAR2PACKED32(REG_RED,REG_GREEN,REG_BLUE,REG_ALPHA)
+
+    YUV2RGB_ENDLOOP(4)
+    YUV2RGB_OPERANDS
+}
+
+static inline int RENAME(yuva420_rgb32)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                        int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+#if HAVE_7REGS
+    int y, h_size;
+
+    YUV2RGB_LOOP(4)
+
+        const uint8_t *pa = src[3] + y*srcStride[3];
+        YUV2RGB_INIT
+        YUV2RGB
+        "movq     (%6, %0, 2), %%mm3;"            /* Load 8 A A7 A6 A5 A4 A3 A2 A1 A0 */
+        RGB_PLANAR2PACKED32(REG_RED,REG_GREEN,REG_BLUE,REG_ALPHA)
+
+    YUV2RGB_ENDLOOP(4)
+    YUV2RGB_OPERANDS_ALPHA
+#endif
+}
+
+static inline int RENAME(yuv420_bgr32)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                       int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV422_UNSHIFT
+    YUV2RGB_LOOP(4)
+
+        YUV2RGB_INIT
+        YUV2RGB
+        "pcmpeqd   %%mm3, %%mm3;"   /* fill mm3 */
+        RGB_PLANAR2PACKED32(REG_BLUE,REG_GREEN,REG_RED,REG_ALPHA)
+
+    YUV2RGB_ENDLOOP(4)
+    YUV2RGB_OPERANDS
+}
+
+static inline int RENAME(yuva420_bgr32)(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY,
+                                        int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+#if HAVE_7REGS
+    int y, h_size;
+
+    YUV2RGB_LOOP(4)
+
+        const uint8_t *pa = src[3] + y*srcStride[3];
+        YUV2RGB_INIT
+        YUV2RGB
+        "movq     (%6, %0, 2), %%mm3;"            /* Load 8 A A7 A6 A5 A4 A3 A2 A1 A0 */
+        RGB_PLANAR2PACKED32(REG_BLUE,REG_GREEN,REG_RED,REG_ALPHA)
+
+    YUV2RGB_ENDLOOP(4)
+    YUV2RGB_OPERANDS_ALPHA
+#endif
+}

Added: branches/0.6/libswscale/x86/yuv2rgb_template2.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/x86/yuv2rgb_template2.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,459 @@
+/*
+ * software YUV to RGB converter
+ *
+ * Copyright (C) 2001-2007 Michael Niedermayer
+ *           (c) 2010 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#undef MOVNTQ
+#undef EMMS
+#undef SFENCE
+
+#if HAVE_AMD3DNOW
+/* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
+#define EMMS   "femms"
+#else
+#define EMMS   "emms"
+#endif
+
+#if HAVE_MMX2
+#define MOVNTQ "movntq"
+#define SFENCE "sfence"
+#else
+#define MOVNTQ "movq"
+#define SFENCE " # nop"
+#endif
+
+#define REG_BLUE  "0"
+#define REG_RED   "1"
+#define REG_GREEN "2"
+#define REG_ALPHA "3"
+
+#define YUV2RGB_LOOP(depth)                                          \
+    h_size = (c->dstW + 7) & ~7;                                     \
+    if (h_size * depth > FFABS(dstStride[0]))                        \
+        h_size -= 8;                                                 \
+                                                                     \
+    if (c->srcFormat == PIX_FMT_YUV422P) {                           \
+        srcStride[1] *= 2;                                           \
+        srcStride[2] *= 2;                                           \
+    }                                                                \
+                                                                     \
+    __asm__ volatile ("pxor %mm4, %mm4\n\t");                        \
+    for (y = 0; y < srcSliceH; y++) {                                \
+        uint8_t *image    = dst[0] + (y + srcSliceY) * dstStride[0]; \
+        const uint8_t *py = src[0] +               y * srcStride[0]; \
+        const uint8_t *pu = src[1] +        (y >> 1) * srcStride[1]; \
+        const uint8_t *pv = src[2] +        (y >> 1) * srcStride[2]; \
+        x86_reg index = -h_size / 2;                                 \
+
+#define YUV2RGB_INITIAL_LOAD          \
+    __asm__ volatile (                \
+        "movq (%5, %0, 2), %%mm6\n\t" \
+        "movd    (%2, %0), %%mm0\n\t" \
+        "movd    (%3, %0), %%mm1\n\t" \
+        "1: \n\t"                     \
+
+/* YUV2RGB core
+ * Conversion is performed in usual way:
+ * R = Y' * Ycoef + Vred * V'
+ * G = Y' * Ycoef + Vgreen * V' + Ugreen * U'
+ * B = Y' * Ycoef               + Ublue * U'
+ *
+ * where X' = X * 8 - Xoffset (multiplication is performed to increase
+ * precision a bit).
+ * Since it operates in YUV420 colorspace, Y component is additionally
+ * split into Y1 and Y2 for even and odd pixels.
+ *
+ * Input:
+ * mm0 - U (4 elems), mm1 - V (4 elems), mm6 - Y (8 elems), mm4 - zero register
+ * Output:
+ * mm1 - R, mm2 - G, mm0 - B
+ */
+#define YUV2RGB                                  \
+    /* convert Y, U, V into Y1', Y2', U', V' */  \
+    "movq      %%mm6, %%mm7\n\t"                 \
+    "punpcklbw %%mm4, %%mm0\n\t"                 \
+    "punpcklbw %%mm4, %%mm1\n\t"                 \
+    "pand     "MANGLE(mmx_00ffw)", %%mm6\n\t"    \
+    "psrlw     $8,    %%mm7\n\t"                 \
+    "psllw     $3,    %%mm0\n\t"                 \
+    "psllw     $3,    %%mm1\n\t"                 \
+    "psllw     $3,    %%mm6\n\t"                 \
+    "psllw     $3,    %%mm7\n\t"                 \
+    "psubsw   "U_OFFSET"(%4), %%mm0\n\t"         \
+    "psubsw   "V_OFFSET"(%4), %%mm1\n\t"         \
+    "psubw    "Y_OFFSET"(%4), %%mm6\n\t"         \
+    "psubw    "Y_OFFSET"(%4), %%mm7\n\t"         \
+\
+     /* multiply by coefficients */              \
+    "movq      %%mm0, %%mm2\n\t"                 \
+    "movq      %%mm1, %%mm3\n\t"                 \
+    "pmulhw   "UG_COEFF"(%4), %%mm2\n\t"         \
+    "pmulhw   "VG_COEFF"(%4), %%mm3\n\t"         \
+    "pmulhw   "Y_COEFF" (%4), %%mm6\n\t"         \
+    "pmulhw   "Y_COEFF" (%4), %%mm7\n\t"         \
+    "pmulhw   "UB_COEFF"(%4), %%mm0\n\t"         \
+    "pmulhw   "VR_COEFF"(%4), %%mm1\n\t"         \
+    "paddsw    %%mm3, %%mm2\n\t"                 \
+    /* now: mm0 = UB, mm1 = VR, mm2 = CG */      \
+    /*      mm6 = Y1, mm7 = Y2 */                \
+\
+    /* produce RGB */                            \
+    "movq      %%mm7, %%mm3\n\t"                 \
+    "movq      %%mm7, %%mm5\n\t"                 \
+    "paddsw    %%mm0, %%mm3\n\t"                 \
+    "paddsw    %%mm1, %%mm5\n\t"                 \
+    "paddsw    %%mm2, %%mm7\n\t"                 \
+    "paddsw    %%mm6, %%mm0\n\t"                 \
+    "paddsw    %%mm6, %%mm1\n\t"                 \
+    "paddsw    %%mm6, %%mm2\n\t"                 \
+\
+    /* pack and interleave even/odd pixels */    \
+    "packuswb  %%mm0, %%mm0\n\t"                 \
+    "packuswb  %%mm1, %%mm1\n\t"                 \
+    "packuswb  %%mm2, %%mm2\n\t"                 \
+    "packuswb  %%mm3, %%mm3\n\t"                 \
+    "packuswb  %%mm5, %%mm5\n\t"                 \
+    "packuswb  %%mm7, %%mm7\n\t"                 \
+    "punpcklbw %%mm3, %%mm0\n\t"                 \
+    "punpcklbw %%mm5, %%mm1\n\t"                 \
+    "punpcklbw %%mm7, %%mm2\n\t"                 \
+
+#define YUV2RGB_ENDLOOP(depth)                   \
+    "movq 8 (%5, %0, 2), %%mm6\n\t"              \
+    "movd 4 (%3, %0),    %%mm1\n\t"              \
+    "movd 4 (%2, %0),    %%mm0\n\t"              \
+    "add $"AV_STRINGIFY(depth * 8)", %1\n\t"     \
+    "add  $4, %0\n\t"                            \
+    "js   1b\n\t"                                \
+
+#define YUV2RGB_OPERANDS                                          \
+        : "+r" (index), "+r" (image)                              \
+        : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), \
+          "r" (py - 2*index)                                      \
+        );                                                        \
+    }                                                             \
+
+#define YUV2RGB_OPERANDS_ALPHA                                    \
+        : "+r" (index), "+r" (image)                              \
+        : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), \
+          "r" (py - 2*index), "r" (pa - 2*index)                  \
+        );                                                        \
+    }                                                             \
+
+#define YUV2RGB_ENDFUNC                          \
+    __asm__ volatile (SFENCE"\n\t"EMMS);         \
+    return srcSliceH;                            \
+
+
+#define RGB_PACK16(gmask, gshift, rshift)        \
+    "pand      "MANGLE(mmx_redmask)", %%mm0\n\t" \
+    "pand      "MANGLE(mmx_redmask)", %%mm1\n\t" \
+    "psrlw     $3,        %%mm0\n\t"             \
+    "pand      "MANGLE(gmask)",       %%mm2\n\t" \
+    "movq      %%mm0,     %%mm5\n\t"             \
+    "movq      %%mm1,     %%mm6\n\t"             \
+    "movq      %%mm2,     %%mm7\n\t"             \
+    "punpcklbw %%mm4,     %%mm0\n\t"             \
+    "punpcklbw %%mm4,     %%mm1\n\t"             \
+    "punpcklbw %%mm4,     %%mm2\n\t"             \
+    "punpckhbw %%mm4,     %%mm5\n\t"             \
+    "punpckhbw %%mm4,     %%mm6\n\t"             \
+    "punpckhbw %%mm4,     %%mm7\n\t"             \
+    "psllw     $"rshift", %%mm1\n\t"             \
+    "psllw     $"rshift", %%mm6\n\t"             \
+    "psllw     $"gshift", %%mm2\n\t"             \
+    "psllw     $"gshift", %%mm7\n\t"             \
+    "por       %%mm1,     %%mm0\n\t"             \
+    "por       %%mm6,     %%mm5\n\t"             \
+    "por       %%mm2,     %%mm0\n\t"             \
+    "por       %%mm7,     %%mm5\n\t"             \
+    MOVNTQ "   %%mm0,      (%1)\n\t"             \
+    MOVNTQ "   %%mm5,     8(%1)\n\t"             \
+
+#define DITHER_RGB                               \
+    "paddusb "BLUE_DITHER"(%4),  %%mm0\n\t"      \
+    "paddusb "GREEN_DITHER"(%4), %%mm2\n\t"      \
+    "paddusb "RED_DITHER"(%4),   %%mm1\n\t"      \
+
+static inline int RENAME(yuv420_rgb15)(SwsContext *c, const uint8_t *src[],
+                                       int srcStride[],
+                                       int srcSliceY, int srcSliceH,
+                                       uint8_t *dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV2RGB_LOOP(2)
+
+#ifdef DITHER1XBPP
+        c->blueDither  = ff_dither8[y       & 1];
+        c->greenDither = ff_dither8[y       & 1];
+        c->redDither   = ff_dither8[(y + 1) & 1];
+#endif
+
+        YUV2RGB_INITIAL_LOAD
+        YUV2RGB
+#ifdef DITHER1XBPP
+        DITHER_RGB
+#endif
+        RGB_PACK16(mmx_redmask, "2", "7")
+
+    YUV2RGB_ENDLOOP(2)
+    YUV2RGB_OPERANDS
+    YUV2RGB_ENDFUNC
+}
+
+static inline int RENAME(yuv420_rgb16)(SwsContext *c, const uint8_t *src[],
+                                       int srcStride[],
+                                       int srcSliceY, int srcSliceH,
+                                       uint8_t *dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV2RGB_LOOP(2)
+
+#ifdef DITHER1XBPP
+        c->blueDither  = ff_dither8[y       & 1];
+        c->greenDither = ff_dither4[y       & 1];
+        c->redDither   = ff_dither8[(y + 1) & 1];
+#endif
+
+        YUV2RGB_INITIAL_LOAD
+        YUV2RGB
+#ifdef DITHER1XBPP
+        DITHER_RGB
+#endif
+        RGB_PACK16(mmx_grnmask, "3", "8")
+
+    YUV2RGB_ENDLOOP(2)
+    YUV2RGB_OPERANDS
+    YUV2RGB_ENDFUNC
+}
+
+
+#define RGB_PACK24(red, blue)              \
+    /* generate first packed RGB octet */  \
+    "movq      %%mm2,      %%mm5\n\t"      \
+    "movq      %%mm"blue", %%mm6\n\t"      \
+    "movq      %%mm"red",  %%mm7\n\t"      \
+    "punpcklbw %%mm5,      %%mm6\n\t"      \
+    "punpcklbw %%mm4,      %%mm7\n\t"      \
+    "movq      %%mm6,      %%mm3\n\t"      \
+    "punpcklwd %%mm7,      %%mm6\n\t"      \
+    "psrlq     $32,        %%mm3\n\t"      \
+    "movq      %%mm6,      %%mm5\n\t"      \
+    "psllq     $40,        %%mm6\n\t"      \
+    "psllq     $48,        %%mm3\n\t"      \
+    "psrlq     $32,        %%mm5\n\t"      \
+    "psrlq     $40,        %%mm6\n\t"      \
+    "psllq     $24,        %%mm5\n\t"      \
+    "por       %%mm3,      %%mm6\n\t"      \
+    "por       %%mm5,      %%mm6\n\t"      \
+    MOVNTQ "   %%mm6,      (%1)\n\t"       \
+\
+    /* generate second packed RGB octet */ \
+    "movq      %%mm"red",  %%mm7\n\t"      \
+    "movq      %%mm2,      %%mm5\n\t"      \
+    "movq      %%mm"blue", %%mm6\n\t"      \
+    "punpcklbw %%mm4,      %%mm7\n\t"      \
+    "punpcklbw %%mm5,      %%mm6\n\t"      \
+    "movq      %%mm7,      %%mm3\n\t"      \
+    "punpckhwd %%mm7,      %%mm6\n\t"      \
+    "psllq     $16,        %%mm3\n\t"      \
+    "psrlq     $32,        %%mm6\n\t"      \
+    "psrlq     $48,        %%mm3\n\t"      \
+    "psllq     $8,         %%mm6\n\t"      \
+    "movq      %%mm"red",  %%mm7\n\t"      \
+    "por       %%mm6,      %%mm3\n\t"      \
+    "movq      %%mm"blue", %%mm6\n\t"      \
+    "movq      %%mm2,      %%mm5\n\t"      \
+    "punpckhbw %%mm4,      %%mm7\n\t"      \
+    "punpckhbw %%mm5,      %%mm6\n\t"      \
+    "movq      %%mm6,      %%mm5\n\t"      \
+    "punpcklwd %%mm7,      %%mm6\n\t"      \
+    "psrlq     $16,        %%mm5\n\t"      \
+    "psllq     $56,        %%mm5\n\t"      \
+    "por       %%mm5,      %%mm3\n\t"      \
+    "psllq     $32,        %%mm6\n\t"      \
+    "por       %%mm6,      %%mm3\n\t"      \
+    MOVNTQ "   %%mm3,      8(%1)\n\t"      \
+\
+    /* generate third packed RGB octet */  \
+    "movq      %%mm"red",  %%mm7\n\t"      \
+    "movq      %%mm2,      %%mm5\n\t"      \
+    "movq      %%mm2,      %%mm3\n\t"      \
+    "movq      %%mm"blue", %%mm6\n\t"      \
+    "punpckhbw %%mm"red",  %%mm3\n\t"      \
+    "punpckhbw %%mm4,      %%mm7\n\t"      \
+    "psllq     $32,        %%mm3\n\t"      \
+    "punpckhbw %%mm5,      %%mm6\n\t"      \
+    "psrlq     $48,        %%mm3\n\t"      \
+    "punpckhwd %%mm7,      %%mm6\n\t"      \
+    "movq      %%mm6,      %%mm7\n\t"      \
+    "psrlq     $32,        %%mm6\n\t"      \
+    "psllq     $32,        %%mm7\n\t"      \
+    "psllq     $40,        %%mm6\n\t"      \
+    "psrlq     $16,        %%mm7\n\t"      \
+    "por       %%mm6,      %%mm3\n\t"      \
+    "por       %%mm7,      %%mm3\n\t"      \
+    MOVNTQ "   %%mm3,      16(%1)\n\t"     \
+
+static inline int RENAME(yuv420_rgb24)(SwsContext *c, const uint8_t *src[],
+                                       int srcStride[],
+                                       int srcSliceY, int srcSliceH,
+                                       uint8_t *dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV2RGB_LOOP(3)
+
+        YUV2RGB_INITIAL_LOAD
+        YUV2RGB
+        RGB_PACK24(REG_BLUE, REG_RED)
+
+    YUV2RGB_ENDLOOP(3)
+    YUV2RGB_OPERANDS
+    YUV2RGB_ENDFUNC
+}
+
+static inline int RENAME(yuv420_bgr24)(SwsContext *c, const uint8_t *src[],
+                                       int srcStride[],
+                                       int srcSliceY, int srcSliceH,
+                                       uint8_t *dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV2RGB_LOOP(3)
+
+        YUV2RGB_INITIAL_LOAD
+        YUV2RGB
+        RGB_PACK24(REG_RED, REG_BLUE)
+
+    YUV2RGB_ENDLOOP(3)
+    YUV2RGB_OPERANDS
+    YUV2RGB_ENDFUNC
+}
+
+
+#define SET_EMPTY_ALPHA                                                      \
+    "pcmpeqd   %%mm"REG_ALPHA", %%mm"REG_ALPHA"\n\t" /* set alpha to 0xFF */ \
+
+#define LOAD_ALPHA                                   \
+    "movq      (%6, %0, 2),     %%mm"REG_ALPHA"\n\t" \
+
+#define RGB_PACK32(red, green, blue, alpha)  \
+    "movq      %%mm"blue",  %%mm5\n\t"       \
+    "movq      %%mm"red",   %%mm6\n\t"       \
+    "punpckhbw %%mm"green", %%mm5\n\t"       \
+    "punpcklbw %%mm"green", %%mm"blue"\n\t"  \
+    "punpckhbw %%mm"alpha", %%mm6\n\t"       \
+    "punpcklbw %%mm"alpha", %%mm"red"\n\t"   \
+    "movq      %%mm"blue",  %%mm"green"\n\t" \
+    "movq      %%mm5,       %%mm"alpha"\n\t" \
+    "punpcklwd %%mm"red",   %%mm"blue"\n\t"  \
+    "punpckhwd %%mm"red",   %%mm"green"\n\t" \
+    "punpcklwd %%mm6,       %%mm5\n\t"       \
+    "punpckhwd %%mm6,       %%mm"alpha"\n\t" \
+    MOVNTQ "   %%mm"blue",   0(%1)\n\t"      \
+    MOVNTQ "   %%mm"green",  8(%1)\n\t"      \
+    MOVNTQ "   %%mm5,       16(%1)\n\t"      \
+    MOVNTQ "   %%mm"alpha", 24(%1)\n\t"      \
+
+static inline int RENAME(yuv420_rgb32)(SwsContext *c, const uint8_t *src[],
+                                       int srcStride[],
+                                       int srcSliceY, int srcSliceH,
+                                       uint8_t *dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV2RGB_LOOP(4)
+
+        YUV2RGB_INITIAL_LOAD
+        YUV2RGB
+        SET_EMPTY_ALPHA
+        RGB_PACK32(REG_RED, REG_GREEN, REG_BLUE, REG_ALPHA)
+
+    YUV2RGB_ENDLOOP(4)
+    YUV2RGB_OPERANDS
+    YUV2RGB_ENDFUNC
+}
+
+static inline int RENAME(yuva420_rgb32)(SwsContext *c, const uint8_t *src[],
+                                        int srcStride[],
+                                        int srcSliceY, int srcSliceH,
+                                        uint8_t *dst[], int dstStride[])
+{
+#if HAVE_7REGS
+    int y, h_size;
+
+    YUV2RGB_LOOP(4)
+
+        const uint8_t *pa = src[3] + y * srcStride[3];
+        YUV2RGB_INITIAL_LOAD
+        YUV2RGB
+        LOAD_ALPHA
+        RGB_PACK32(REG_RED, REG_GREEN, REG_BLUE, REG_ALPHA)
+
+    YUV2RGB_ENDLOOP(4)
+    YUV2RGB_OPERANDS_ALPHA
+    YUV2RGB_ENDFUNC
+#endif
+}
+
+static inline int RENAME(yuv420_bgr32)(SwsContext *c, const uint8_t *src[],
+                                       int srcStride[],
+                                       int srcSliceY, int srcSliceH,
+                                       uint8_t *dst[], int dstStride[])
+{
+    int y, h_size;
+
+    YUV2RGB_LOOP(4)
+
+        YUV2RGB_INITIAL_LOAD
+        YUV2RGB
+        SET_EMPTY_ALPHA
+        RGB_PACK32(REG_BLUE, REG_GREEN, REG_RED, REG_ALPHA)
+
+    YUV2RGB_ENDLOOP(4)
+    YUV2RGB_OPERANDS
+    YUV2RGB_ENDFUNC
+}
+
+static inline int RENAME(yuva420_bgr32)(SwsContext *c, const uint8_t *src[],
+                                        int srcStride[],
+                                        int srcSliceY, int srcSliceH,
+                                        uint8_t *dst[], int dstStride[])
+{
+#if HAVE_7REGS
+    int y, h_size;
+
+    YUV2RGB_LOOP(4)
+
+        const uint8_t *pa = src[3] + y * srcStride[3];
+        YUV2RGB_INITIAL_LOAD
+        YUV2RGB
+        LOAD_ALPHA
+        RGB_PACK32(REG_BLUE, REG_GREEN, REG_RED, REG_ALPHA)
+
+    YUV2RGB_ENDLOOP(4)
+    YUV2RGB_OPERANDS_ALPHA
+    YUV2RGB_ENDFUNC
+#endif
+}

Added: branches/0.6/libswscale/yuv2rgb.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ branches/0.6/libswscale/yuv2rgb.c	Tue May  4 23:01:48 2010	(r23019)
@@ -0,0 +1,825 @@
+/*
+ * software YUV to RGB converter
+ *
+ * Copyright (C) 2009 Konstantin Shishkov
+ *
+ * 1,4,8bpp support and context / deglobalize stuff
+ * by Michael Niedermayer (michaelni at gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <assert.h>
+
+#include "config.h"
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+#include "libavutil/x86_cpu.h"
+#include "libavutil/bswap.h"
+
+extern const uint8_t dither_4x4_16[4][8];
+extern const uint8_t dither_8x8_32[8][8];
+extern const uint8_t dither_8x8_73[8][8];
+extern const uint8_t dither_8x8_220[8][8];
+
+const int32_t ff_yuv2rgb_coeffs[8][4] = {
+    {117504, 138453, 13954, 34903}, /* no sequence_display_extension */
+    {117504, 138453, 13954, 34903}, /* ITU-R Rec. 709 (1990) */
+    {104597, 132201, 25675, 53279}, /* unspecified */
+    {104597, 132201, 25675, 53279}, /* reserved */
+    {104448, 132798, 24759, 53109}, /* FCC */
+    {104597, 132201, 25675, 53279}, /* ITU-R Rec. 624-4 System B, G */
+    {104597, 132201, 25675, 53279}, /* SMPTE 170M */
+    {117579, 136230, 16907, 35559}  /* SMPTE 240M (1987) */
+};
+
+const int *sws_getCoefficients(int colorspace)
+{
+    if (colorspace > 7 || colorspace < 0)
+        colorspace = SWS_CS_DEFAULT;
+    return ff_yuv2rgb_coeffs[colorspace];
+}
+
+#define LOADCHROMA(i)                               \
+    U = pu[i];                                      \
+    V = pv[i];                                      \
+    r = (void *)c->table_rV[V];                     \
+    g = (void *)(c->table_gU[U] + c->table_gV[V]);  \
+    b = (void *)c->table_bU[U];
+
+#define PUTRGB(dst,src,i)            \
+    Y = src[2*i];                    \
+    dst[2*i  ] = r[Y] + g[Y] + b[Y]; \
+    Y = src[2*i+1];                  \
+    dst[2*i+1] = r[Y] + g[Y] + b[Y];
+
+#define PUTRGB24(dst,src,i)                                  \
+    Y = src[2*i];                                            \
+    dst[6*i+0] = r[Y]; dst[6*i+1] = g[Y]; dst[6*i+2] = b[Y]; \
+    Y = src[2*i+1];                                          \
+    dst[6*i+3] = r[Y]; dst[6*i+4] = g[Y]; dst[6*i+5] = b[Y];
+
+#define PUTBGR24(dst,src,i)                                  \
+    Y = src[2*i];                                            \
+    dst[6*i+0] = b[Y]; dst[6*i+1] = g[Y]; dst[6*i+2] = r[Y]; \
+    Y = src[2*i+1];                                          \
+    dst[6*i+3] = b[Y]; dst[6*i+4] = g[Y]; dst[6*i+5] = r[Y];
+
+#define PUTRGBA(dst,ysrc,asrc,i,s)                      \
+    Y = ysrc[2*i];                                      \
+    dst[2*i  ] = r[Y] + g[Y] + b[Y] + (asrc[2*i  ]<<s); \
+    Y = ysrc[2*i+1];                                    \
+    dst[2*i+1] = r[Y] + g[Y] + b[Y] + (asrc[2*i+1]<<s);
+
+#define PUTRGB48(dst,src,i)             \
+    Y = src[2*i];                       \
+    dst[12*i+ 0] = dst[12*i+ 1] = r[Y]; \
+    dst[12*i+ 2] = dst[12*i+ 3] = g[Y]; \
+    dst[12*i+ 4] = dst[12*i+ 5] = b[Y]; \
+    Y = src[2*i+1];                     \
+    dst[12*i+ 6] = dst[12*i+ 7] = r[Y]; \
+    dst[12*i+ 8] = dst[12*i+ 9] = g[Y]; \
+    dst[12*i+10] = dst[12*i+11] = b[Y];
+
+#define YUV2RGBFUNC(func_name, dst_type, alpha) \
+static int func_name(SwsContext *c, const uint8_t* src[], int srcStride[], int srcSliceY, \
+                     int srcSliceH, uint8_t* dst[], int dstStride[]) \
+{\
+    int y;\
+\
+    if (!alpha && c->srcFormat == PIX_FMT_YUV422P) {\
+        srcStride[1] *= 2;\
+        srcStride[2] *= 2;\
+    }\
+    for (y=0; y<srcSliceH; y+=2) {\
+        dst_type *dst_1 = (dst_type*)(dst[0] + (y+srcSliceY  )*dstStride[0]);\
+        dst_type *dst_2 = (dst_type*)(dst[0] + (y+srcSliceY+1)*dstStride[0]);\
+        dst_type av_unused *r, *b;\
+        dst_type *g;\
+        const uint8_t *py_1 = src[0] + y*srcStride[0];\
+        const uint8_t *py_2 = py_1 + srcStride[0];\
+        const uint8_t *pu = src[1] + (y>>1)*srcStride[1];\
+        const uint8_t *pv = src[2] + (y>>1)*srcStride[2];\
+        const uint8_t av_unused *pa_1, *pa_2;\
+        unsigned int h_size = c->dstW>>3;\
+        if (alpha) {\
+            pa_1 = src[3] + y*srcStride[3];\
+            pa_2 = pa_1 + srcStride[3];\
+        }\
+        while (h_size--) {\
+            int av_unused U, V;\
+            int Y;\
+
+#define ENDYUV2RGBLINE(dst_delta)\
+            pu += 4;\
+            pv += 4;\
+            py_1 += 8;\
+            py_2 += 8;\
+            dst_1 += dst_delta;\
+            dst_2 += dst_delta;\
+        }\
+        if (c->dstW & 4) {\
+            int av_unused Y, U, V;\
+
+#define ENDYUV2RGBFUNC()\
+        }\
+    }\
+    return srcSliceH;\
+}
+
+#define CLOSEYUV2RGBFUNC(dst_delta)\
+    ENDYUV2RGBLINE(dst_delta)\
+    ENDYUV2RGBFUNC()
+
+YUV2RGBFUNC(yuv2rgb_c_48, uint8_t, 0)
+    LOADCHROMA(0);
+    PUTRGB48(dst_1,py_1,0);
+    PUTRGB48(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB48(dst_2,py_2,1);
+    PUTRGB48(dst_1,py_1,1);
+
+    LOADCHROMA(2);
+    PUTRGB48(dst_1,py_1,2);
+    PUTRGB48(dst_2,py_2,2);
+
+    LOADCHROMA(3);
+    PUTRGB48(dst_2,py_2,3);
+    PUTRGB48(dst_1,py_1,3);
+ENDYUV2RGBLINE(48)
+    LOADCHROMA(0);
+    PUTRGB48(dst_1,py_1,0);
+    PUTRGB48(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB48(dst_2,py_2,1);
+    PUTRGB48(dst_1,py_1,1);
+ENDYUV2RGBFUNC()
+
+YUV2RGBFUNC(yuv2rgb_c_32, uint32_t, 0)
+    LOADCHROMA(0);
+    PUTRGB(dst_1,py_1,0);
+    PUTRGB(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB(dst_2,py_2,1);
+    PUTRGB(dst_1,py_1,1);
+
+    LOADCHROMA(2);
+    PUTRGB(dst_1,py_1,2);
+    PUTRGB(dst_2,py_2,2);
+
+    LOADCHROMA(3);
+    PUTRGB(dst_2,py_2,3);
+    PUTRGB(dst_1,py_1,3);
+ENDYUV2RGBLINE(8)
+    LOADCHROMA(0);
+    PUTRGB(dst_1,py_1,0);
+    PUTRGB(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB(dst_2,py_2,1);
+    PUTRGB(dst_1,py_1,1);
+ENDYUV2RGBFUNC()
+
+YUV2RGBFUNC(yuva2rgba_c, uint32_t, 1)
+    LOADCHROMA(0);
+    PUTRGBA(dst_1,py_1,pa_1,0,24);
+    PUTRGBA(dst_2,py_2,pa_2,0,24);
+
+    LOADCHROMA(1);
+    PUTRGBA(dst_2,py_2,pa_1,1,24);
+    PUTRGBA(dst_1,py_1,pa_2,1,24);
+
+    LOADCHROMA(2);
+    PUTRGBA(dst_1,py_1,pa_1,2,24);
+    PUTRGBA(dst_2,py_2,pa_2,2,24);
+
+    LOADCHROMA(3);
+    PUTRGBA(dst_2,py_2,pa_1,3,24);
+    PUTRGBA(dst_1,py_1,pa_2,3,24);
+    pa_1 += 8;\
+    pa_2 += 8;\
+ENDYUV2RGBLINE(8)
+    LOADCHROMA(0);
+    PUTRGBA(dst_1,py_1,pa_1,0,24);
+    PUTRGBA(dst_2,py_2,pa_2,0,24);
+
+    LOADCHROMA(1);
+    PUTRGBA(dst_2,py_2,pa_1,1,24);
+    PUTRGBA(dst_1,py_1,pa_2,1,24);
+ENDYUV2RGBFUNC()
+
+YUV2RGBFUNC(yuva2argb_c, uint32_t, 1)
+    LOADCHROMA(0);
+    PUTRGBA(dst_1,py_1,pa_1,0,0);
+    PUTRGBA(dst_2,py_2,pa_2,0,0);
+
+    LOADCHROMA(1);
+    PUTRGBA(dst_2,py_2,pa_2,1,0);
+    PUTRGBA(dst_1,py_1,pa_1,1,0);
+
+    LOADCHROMA(2);
+    PUTRGBA(dst_1,py_1,pa_1,2,0);
+    PUTRGBA(dst_2,py_2,pa_2,2,0);
+
+    LOADCHROMA(3);
+    PUTRGBA(dst_2,py_2,pa_2,3,0);
+    PUTRGBA(dst_1,py_1,pa_1,3,0);
+    pa_1 += 8;\
+    pa_2 += 8;\
+ENDYUV2RGBLINE(8)
+    LOADCHROMA(0);
+    PUTRGBA(dst_1,py_1,pa_1,0,0);
+    PUTRGBA(dst_2,py_2,pa_2,0,0);
+
+    LOADCHROMA(1);
+    PUTRGBA(dst_2,py_2,pa_2,1,0);
+    PUTRGBA(dst_1,py_1,pa_1,1,0);
+ENDYUV2RGBFUNC()
+
+YUV2RGBFUNC(yuv2rgb_c_24_rgb, uint8_t, 0)
+    LOADCHROMA(0);
+    PUTRGB24(dst_1,py_1,0);
+    PUTRGB24(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB24(dst_2,py_2,1);
+    PUTRGB24(dst_1,py_1,1);
+
+    LOADCHROMA(2);
+    PUTRGB24(dst_1,py_1,2);
+    PUTRGB24(dst_2,py_2,2);
+
+    LOADCHROMA(3);
+    PUTRGB24(dst_2,py_2,3);
+    PUTRGB24(dst_1,py_1,3);
+ENDYUV2RGBLINE(24)
+    LOADCHROMA(0);
+    PUTRGB24(dst_1,py_1,0);
+    PUTRGB24(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB24(dst_2,py_2,1);
+    PUTRGB24(dst_1,py_1,1);
+ENDYUV2RGBFUNC()
+
+// only trivial mods from yuv2rgb_c_24_rgb
+YUV2RGBFUNC(yuv2rgb_c_24_bgr, uint8_t, 0)
+    LOADCHROMA(0);
+    PUTBGR24(dst_1,py_1,0);
+    PUTBGR24(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTBGR24(dst_2,py_2,1);
+    PUTBGR24(dst_1,py_1,1);
+
+    LOADCHROMA(2);
+    PUTBGR24(dst_1,py_1,2);
+    PUTBGR24(dst_2,py_2,2);
+
+    LOADCHROMA(3);
+    PUTBGR24(dst_2,py_2,3);
+    PUTBGR24(dst_1,py_1,3);
+ENDYUV2RGBLINE(24)
+    LOADCHROMA(0);
+    PUTBGR24(dst_1,py_1,0);
+    PUTBGR24(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTBGR24(dst_2,py_2,1);
+    PUTBGR24(dst_1,py_1,1);
+ENDYUV2RGBFUNC()
+
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+YUV2RGBFUNC(yuv2rgb_c_16, uint16_t, 0)
+    LOADCHROMA(0);
+    PUTRGB(dst_1,py_1,0);
+    PUTRGB(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB(dst_2,py_2,1);
+    PUTRGB(dst_1,py_1,1);
+
+    LOADCHROMA(2);
+    PUTRGB(dst_1,py_1,2);
+    PUTRGB(dst_2,py_2,2);
+
+    LOADCHROMA(3);
+    PUTRGB(dst_2,py_2,3);
+    PUTRGB(dst_1,py_1,3);
+CLOSEYUV2RGBFUNC(8)
+
+#if 0 // Currently unused
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+YUV2RGBFUNC(yuv2rgb_c_8, uint8_t, 0)
+    LOADCHROMA(0);
+    PUTRGB(dst_1,py_1,0);
+    PUTRGB(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB(dst_2,py_2,1);
+    PUTRGB(dst_1,py_1,1);
+
+    LOADCHROMA(2);
+    PUTRGB(dst_1,py_1,2);
+    PUTRGB(dst_2,py_2,2);
+
+    LOADCHROMA(3);
+    PUTRGB(dst_2,py_2,3);
+    PUTRGB(dst_1,py_1,3);
+CLOSEYUV2RGBFUNC(8)
+#endif
+
+// r, g, b, dst_1, dst_2
+YUV2RGBFUNC(yuv2rgb_c_12_ordered_dither, uint16_t, 0)
+    const uint8_t *d16 = dither_4x4_16[y&3];
+#define PUTRGB12(dst,src,i,o)                                   \
+    Y = src[2*i];                                               \
+    dst[2*i]   = r[Y+d16[0+o]] + g[Y+d16[0+o]] + b[Y+d16[0+o]]; \
+    Y = src[2*i+1];                                             \
+    dst[2*i+1] = r[Y+d16[1+o]] + g[Y+d16[1+o]] + b[Y+d16[1+o]];
+
+    LOADCHROMA(0);
+    PUTRGB12(dst_1,py_1,0,0);
+    PUTRGB12(dst_2,py_2,0,0+8);
+
+    LOADCHROMA(1);
+    PUTRGB12(dst_2,py_2,1,2+8);
+    PUTRGB12(dst_1,py_1,1,2);
+
+    LOADCHROMA(2);
+    PUTRGB12(dst_1,py_1,2,4);
+    PUTRGB12(dst_2,py_2,2,4+8);
+
+    LOADCHROMA(3);
+    PUTRGB12(dst_2,py_2,3,6+8);
+    PUTRGB12(dst_1,py_1,3,6);
+CLOSEYUV2RGBFUNC(8)
+
+// r, g, b, dst_1, dst_2
+YUV2RGBFUNC(yuv2rgb_c_8_ordered_dither, uint8_t, 0)
+    const uint8_t *d32 = dither_8x8_32[y&7];
+    const uint8_t *d64 = dither_8x8_73[y&7];
+#define PUTRGB8(dst,src,i,o)                                    \
+    Y = src[2*i];                                               \
+    dst[2*i]   = r[Y+d32[0+o]] + g[Y+d32[0+o]] + b[Y+d64[0+o]]; \
+    Y = src[2*i+1];                                             \
+    dst[2*i+1] = r[Y+d32[1+o]] + g[Y+d32[1+o]] + b[Y+d64[1+o]];
+
+    LOADCHROMA(0);
+    PUTRGB8(dst_1,py_1,0,0);
+    PUTRGB8(dst_2,py_2,0,0+8);
+
+    LOADCHROMA(1);
+    PUTRGB8(dst_2,py_2,1,2+8);
+    PUTRGB8(dst_1,py_1,1,2);
+
+    LOADCHROMA(2);
+    PUTRGB8(dst_1,py_1,2,4);
+    PUTRGB8(dst_2,py_2,2,4+8);
+
+    LOADCHROMA(3);
+    PUTRGB8(dst_2,py_2,3,6+8);
+    PUTRGB8(dst_1,py_1,3,6);
+CLOSEYUV2RGBFUNC(8)
+
+#if 0 // Currently unused
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+YUV2RGBFUNC(yuv2rgb_c_4, uint8_t, 0)
+    int acc;
+#define PUTRGB4(dst,src,i)          \
+    Y = src[2*i];                   \
+    acc = r[Y] + g[Y] + b[Y];       \
+    Y = src[2*i+1];                 \
+    acc |= (r[Y] + g[Y] + b[Y])<<4; \
+    dst[i] = acc;
+
+    LOADCHROMA(0);
+    PUTRGB4(dst_1,py_1,0);
+    PUTRGB4(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB4(dst_2,py_2,1);
+    PUTRGB4(dst_1,py_1,1);
+
+    LOADCHROMA(2);
+    PUTRGB4(dst_1,py_1,2);
+    PUTRGB4(dst_2,py_2,2);
+
+    LOADCHROMA(3);
+    PUTRGB4(dst_2,py_2,3);
+    PUTRGB4(dst_1,py_1,3);
+CLOSEYUV2RGBFUNC(4)
+#endif
+
+YUV2RGBFUNC(yuv2rgb_c_4_ordered_dither, uint8_t, 0)
+    const uint8_t *d64 =  dither_8x8_73[y&7];
+    const uint8_t *d128 = dither_8x8_220[y&7];
+    int acc;
+
+#define PUTRGB4D(dst,src,i,o)                                     \
+    Y = src[2*i];                                                 \
+    acc = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]];        \
+    Y = src[2*i+1];                                               \
+    acc |= (r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]])<<4;  \
+    dst[i]= acc;
+
+    LOADCHROMA(0);
+    PUTRGB4D(dst_1,py_1,0,0);
+    PUTRGB4D(dst_2,py_2,0,0+8);
+
+    LOADCHROMA(1);
+    PUTRGB4D(dst_2,py_2,1,2+8);
+    PUTRGB4D(dst_1,py_1,1,2);
+
+    LOADCHROMA(2);
+    PUTRGB4D(dst_1,py_1,2,4);
+    PUTRGB4D(dst_2,py_2,2,4+8);
+
+    LOADCHROMA(3);
+    PUTRGB4D(dst_2,py_2,3,6+8);
+    PUTRGB4D(dst_1,py_1,3,6);
+CLOSEYUV2RGBFUNC(4)
+
+#if 0 // Currently unused
+// This is exactly the same code as yuv2rgb_c_32 except for the types of
+// r, g, b, dst_1, dst_2
+YUV2RGBFUNC(yuv2rgb_c_4b, uint8_t, 0)
+    LOADCHROMA(0);
+    PUTRGB(dst_1,py_1,0);
+    PUTRGB(dst_2,py_2,0);
+
+    LOADCHROMA(1);
+    PUTRGB(dst_2,py_2,1);
+    PUTRGB(dst_1,py_1,1);
+
+    LOADCHROMA(2);
+    PUTRGB(dst_1,py_1,2);
+    PUTRGB(dst_2,py_2,2);
+
+    LOADCHROMA(3);
+    PUTRGB(dst_2,py_2,3);
+    PUTRGB(dst_1,py_1,3);
+CLOSEYUV2RGBFUNC(8)
+#endif
+
+YUV2RGBFUNC(yuv2rgb_c_4b_ordered_dither, uint8_t, 0)
+    const uint8_t *d64 =  dither_8x8_73[y&7];
+    const uint8_t *d128 = dither_8x8_220[y&7];
+
+#define PUTRGB4DB(dst,src,i,o)                                    \
+    Y = src[2*i];                                                 \
+    dst[2*i]   = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
+    Y = src[2*i+1];                                               \
+    dst[2*i+1] = r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]];
+
+    LOADCHROMA(0);
+    PUTRGB4DB(dst_1,py_1,0,0);
+    PUTRGB4DB(dst_2,py_2,0,0+8);
+
+    LOADCHROMA(1);
+    PUTRGB4DB(dst_2,py_2,1,2+8);
+    PUTRGB4DB(dst_1,py_1,1,2);
+
+    LOADCHROMA(2);
+    PUTRGB4DB(dst_1,py_1,2,4);
+    PUTRGB4DB(dst_2,py_2,2,4+8);
+
+    LOADCHROMA(3);
+    PUTRGB4DB(dst_2,py_2,3,6+8);
+    PUTRGB4DB(dst_1,py_1,3,6);
+CLOSEYUV2RGBFUNC(8)
+
+YUV2RGBFUNC(yuv2rgb_c_1_ordered_dither, uint8_t, 0)
+        const uint8_t *d128 = dither_8x8_220[y&7];
+        char out_1 = 0, out_2 = 0;
+        g= c->table_gU[128] + c->table_gV[128];
+
+#define PUTRGB1(out,src,i,o)    \
+    Y = src[2*i];               \
+    out+= out + g[Y+d128[0+o]]; \
+    Y = src[2*i+1];             \
+    out+= out + g[Y+d128[1+o]];
+
+    PUTRGB1(out_1,py_1,0,0);
+    PUTRGB1(out_2,py_2,0,0+8);
+
+    PUTRGB1(out_2,py_2,1,2+8);
+    PUTRGB1(out_1,py_1,1,2);
+
+    PUTRGB1(out_1,py_1,2,4);
+    PUTRGB1(out_2,py_2,2,4+8);
+
+    PUTRGB1(out_2,py_2,3,6+8);
+    PUTRGB1(out_1,py_1,3,6);
+
+    dst_1[0]= out_1;
+    dst_2[0]= out_2;
+CLOSEYUV2RGBFUNC(1)
+
+SwsFunc ff_yuv2rgb_get_func_ptr(SwsContext *c)
+{
+    SwsFunc t = NULL;
+#if HAVE_MMX
+     t = ff_yuv2rgb_init_mmx(c);
+#endif
+#if HAVE_VIS
+    t = ff_yuv2rgb_init_vis(c);
+#endif
+#if CONFIG_MLIB
+    t = ff_yuv2rgb_init_mlib(c);
+#endif
+#if HAVE_ALTIVEC
+    if (c->flags & SWS_CPU_CAPS_ALTIVEC)
+        t = ff_yuv2rgb_init_altivec(c);
+#endif
+
+#if ARCH_BFIN
+    if (c->flags & SWS_CPU_CAPS_BFIN)
+        t = ff_yuv2rgb_get_func_ptr_bfin(c);
+#endif
+
+    if (t)
+        return t;
+
+    av_log(c, AV_LOG_WARNING, "No accelerated colorspace conversion found from %s to %s.\n", sws_format_name(c->srcFormat), sws_format_name(c->dstFormat));
+
+    switch (c->dstFormat) {
+    case PIX_FMT_RGB48BE:
+    case PIX_FMT_RGB48LE:    return yuv2rgb_c_48;
+    case PIX_FMT_ARGB:
+    case PIX_FMT_ABGR:       if (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) return yuva2argb_c;
+    case PIX_FMT_RGBA:
+    case PIX_FMT_BGRA:       return (CONFIG_SWSCALE_ALPHA && c->srcFormat == PIX_FMT_YUVA420P) ? yuva2rgba_c : yuv2rgb_c_32;
+    case PIX_FMT_RGB24:      return yuv2rgb_c_24_rgb;
+    case PIX_FMT_BGR24:      return yuv2rgb_c_24_bgr;
+    case PIX_FMT_RGB565:
+    case PIX_FMT_BGR565:
+    case PIX_FMT_RGB555:
+    case PIX_FMT_BGR555:     return yuv2rgb_c_16;
+    case PIX_FMT_RGB444:
+    case PIX_FMT_BGR444:     return yuv2rgb_c_12_ordered_dither;
+    case PIX_FMT_RGB8:
+    case PIX_FMT_BGR8:       return yuv2rgb_c_8_ordered_dither;
+    case PIX_FMT_RGB4:
+    case PIX_FMT_BGR4:       return yuv2rgb_c_4_ordered_dither;
+    case PIX_FMT_RGB4_BYTE:
+    case PIX_FMT_BGR4_BYTE:  return yuv2rgb_c_4b_ordered_dither;
+    case PIX_FMT_MONOBLACK:  return yuv2rgb_c_1_ordered_dither;
+    default:
+        assert(0);
+    }
+    return NULL;
+}
+
+static void fill_table(uint8_t* table[256], const int elemsize, const int inc, uint8_t *y_table)
+{
+    int i;
+    int64_t cb = 0;
+
+    y_table -= elemsize * (inc >> 9);
+
+    for (i = 0; i < 256; i++) {
+        table[i] = y_table + elemsize * (cb >> 16);
+        cb += inc;
+    }
+}
+
+static void fill_gv_table(int table[256], const int elemsize, const int inc)
+{
+    int i;
+    int64_t cb = 0;
+    int off = -(inc >> 9);
+
+    for (i = 0; i < 256; i++) {
+        table[i] = elemsize * (off + (cb >> 16));
+        cb += inc;
+    }
+}
+
+av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4], int fullRange,
+                                     int brightness, int contrast, int saturation)
+{
+    const int isRgb =      c->dstFormat==PIX_FMT_RGB32
+                        || c->dstFormat==PIX_FMT_RGB32_1
+                        || c->dstFormat==PIX_FMT_BGR24
+                        || c->dstFormat==PIX_FMT_RGB565BE
+                        || c->dstFormat==PIX_FMT_RGB565LE
+                        || c->dstFormat==PIX_FMT_RGB555BE
+                        || c->dstFormat==PIX_FMT_RGB555LE
+                        || c->dstFormat==PIX_FMT_RGB444BE
+                        || c->dstFormat==PIX_FMT_RGB444LE
+                        || c->dstFormat==PIX_FMT_RGB8
+                        || c->dstFormat==PIX_FMT_RGB4
+                        || c->dstFormat==PIX_FMT_RGB4_BYTE
+                        || c->dstFormat==PIX_FMT_MONOBLACK;
+    const int isNotNe =    c->dstFormat==PIX_FMT_NE(RGB565LE,RGB565BE)
+                        || c->dstFormat==PIX_FMT_NE(RGB555LE,RGB555BE)
+                        || c->dstFormat==PIX_FMT_NE(RGB444LE,RGB444BE)
+                        || c->dstFormat==PIX_FMT_NE(BGR565LE,BGR565BE)
+                        || c->dstFormat==PIX_FMT_NE(BGR555LE,BGR555BE)
+                        || c->dstFormat==PIX_FMT_NE(BGR444LE,BGR444BE);
+    const int bpp = c->dstFormatBpp;
+    uint8_t *y_table;
+    uint16_t *y_table16;
+    uint32_t *y_table32;
+    int i, base, rbase, gbase, bbase, abase, needAlpha;
+    const int yoffs = fullRange ? 384 : 326;
+
+    int64_t crv =  inv_table[0];
+    int64_t cbu =  inv_table[1];
+    int64_t cgu = -inv_table[2];
+    int64_t cgv = -inv_table[3];
+    int64_t cy  = 1<<16;
+    int64_t oy  = 0;
+
+    int64_t yb = 0;
+
+    if (!fullRange) {
+        cy = (cy*255) / 219;
+        oy = 16<<16;
+    } else {
+        crv = (crv*224) / 255;
+        cbu = (cbu*224) / 255;
+        cgu = (cgu*224) / 255;
+        cgv = (cgv*224) / 255;
+    }
+
+    cy  = (cy *contrast             ) >> 16;
+    crv = (crv*contrast * saturation) >> 32;
+    cbu = (cbu*contrast * saturation) >> 32;
+    cgu = (cgu*contrast * saturation) >> 32;
+    cgv = (cgv*contrast * saturation) >> 32;
+    oy -= 256*brightness;
+
+    //scale coefficients by cy
+    crv = ((crv << 16) + 0x8000) / cy;
+    cbu = ((cbu << 16) + 0x8000) / cy;
+    cgu = ((cgu << 16) + 0x8000) / cy;
+    cgv = ((cgv << 16) + 0x8000) / cy;
+
+    av_free(c->yuvTable);
+
+    switch (bpp) {
+    case 1:
+        c->yuvTable = av_malloc(1024);
+        y_table = c->yuvTable;
+        yb = -(384<<16) - oy;
+        for (i = 0; i < 1024-110; i++) {
+            y_table[i+110] = av_clip_uint8((yb + 0x8000) >> 16) >> 7;
+            yb += cy;
+        }
+        fill_table(c->table_gU, 1, cgu, y_table + yoffs);
+        fill_gv_table(c->table_gV, 1, cgv);
+        break;
+    case 4:
+    case 4|128:
+        rbase = isRgb ? 3 : 0;
+        gbase = 1;
+        bbase = isRgb ? 0 : 3;
+        c->yuvTable = av_malloc(1024*3);
+        y_table = c->yuvTable;
+        yb = -(384<<16) - oy;
+        for (i = 0; i < 1024-110; i++) {
+            int yval = av_clip_uint8((yb + 0x8000) >> 16);
+            y_table[i+110     ] =  (yval >> 7)       << rbase;
+            y_table[i+ 37+1024] = ((yval + 43) / 85) << gbase;
+            y_table[i+110+2048] =  (yval >> 7)       << bbase;
+            yb += cy;
+        }
+        fill_table(c->table_rV, 1, crv, y_table + yoffs);
+        fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024);
+        fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048);
+        fill_gv_table(c->table_gV, 1, cgv);
+        break;
+    case 8:
+        rbase = isRgb ? 5 : 0;
+        gbase = isRgb ? 2 : 3;
+        bbase = isRgb ? 0 : 6;
+        c->yuvTable = av_malloc(1024*3);
+        y_table = c->yuvTable;
+        yb = -(384<<16) - oy;
+        for (i = 0; i < 1024-38; i++) {
+            int yval = av_clip_uint8((yb + 0x8000) >> 16);
+            y_table[i+16     ] = ((yval + 18) / 36) << rbase;
+            y_table[i+16+1024] = ((yval + 18) / 36) << gbase;
+            y_table[i+37+2048] = ((yval + 43) / 85) << bbase;
+            yb += cy;
+        }
+        fill_table(c->table_rV, 1, crv, y_table + yoffs);
+        fill_table(c->table_gU, 1, cgu, y_table + yoffs + 1024);
+        fill_table(c->table_bU, 1, cbu, y_table + yoffs + 2048);
+        fill_gv_table(c->table_gV, 1, cgv);
+        break;
+    case 12:
+        rbase = isRgb ? 8 : 0;
+        gbase = 4;
+        bbase = isRgb ? 0 : 8;
+        c->yuvTable = av_malloc(1024*3*2);
+        y_table16 = c->yuvTable;
+        yb = -(384<<16) - oy;
+        for (i = 0; i < 1024; i++) {
+            uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
+            y_table16[i     ] = (yval >> 4) << rbase;
+            y_table16[i+1024] = (yval >> 4) << gbase;
+            y_table16[i+2048] = (yval >> 4) << bbase;
+            yb += cy;
+        }
+        if (isNotNe)
+            for (i = 0; i < 1024*3; i++)
+                y_table16[i] = bswap_16(y_table16[i]);
+        fill_table(c->table_rV, 2, crv, y_table16 + yoffs);
+        fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024);
+        fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048);
+        fill_gv_table(c->table_gV, 2, cgv);
+        break;
+    case 15:
+    case 16:
+        rbase = isRgb ? bpp - 5 : 0;
+        gbase = 5;
+        bbase = isRgb ? 0 : (bpp - 5);
+        c->yuvTable = av_malloc(1024*3*2);
+        y_table16 = c->yuvTable;
+        yb = -(384<<16) - oy;
+        for (i = 0; i < 1024; i++) {
+            uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
+            y_table16[i     ] = (yval >> 3)          << rbase;
+            y_table16[i+1024] = (yval >> (18 - bpp)) << gbase;
+            y_table16[i+2048] = (yval >> 3)          << bbase;
+            yb += cy;
+        }
+        if(isNotNe)
+            for (i = 0; i < 1024*3; i++)
+                y_table16[i] = bswap_16(y_table16[i]);
+        fill_table(c->table_rV, 2, crv, y_table16 + yoffs);
+        fill_table(c->table_gU, 2, cgu, y_table16 + yoffs + 1024);
+        fill_table(c->table_bU, 2, cbu, y_table16 + yoffs + 2048);
+        fill_gv_table(c->table_gV, 2, cgv);
+        break;
+    case 24:
+    case 48:
+        c->yuvTable = av_malloc(1024);
+        y_table = c->yuvTable;
+        yb = -(384<<16) - oy;
+        for (i = 0; i < 1024; i++) {
+            y_table[i] = av_clip_uint8((yb + 0x8000) >> 16);
+            yb += cy;
+        }
+        fill_table(c->table_rV, 1, crv, y_table + yoffs);
+        fill_table(c->table_gU, 1, cgu, y_table + yoffs);
+        fill_table(c->table_bU, 1, cbu, y_table + yoffs);
+        fill_gv_table(c->table_gV, 1, cgv);
+        break;
+    case 32:
+        base = (c->dstFormat == PIX_FMT_RGB32_1 || c->dstFormat == PIX_FMT_BGR32_1) ? 8 : 0;
+        rbase = base + (isRgb ? 16 : 0);
+        gbase = base + 8;
+        bbase = base + (isRgb ? 0 : 16);
+        needAlpha = CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat);
+        if (!needAlpha)
+            abase = (base + 24) & 31;
+        c->yuvTable = av_malloc(1024*3*4);
+        y_table32 = c->yuvTable;
+        yb = -(384<<16) - oy;
+        for (i = 0; i < 1024; i++) {
+            uint8_t yval = av_clip_uint8((yb + 0x8000) >> 16);
+            y_table32[i     ] = (yval << rbase) + (needAlpha ? 0 : (255 << abase));
+            y_table32[i+1024] = yval << gbase;
+            y_table32[i+2048] = yval << bbase;
+            yb += cy;
+        }
+        fill_table(c->table_rV, 4, crv, y_table32 + yoffs);
+        fill_table(c->table_gU, 4, cgu, y_table32 + yoffs + 1024);
+        fill_table(c->table_bU, 4, cbu, y_table32 + yoffs + 2048);
+        fill_gv_table(c->table_gV, 4, cgv);
+        break;
+    default:
+        c->yuvTable = NULL;
+        av_log(c, AV_LOG_ERROR, "%ibpp not supported by yuv2rgb\n", bpp);
+        return -1;
+    }
+    return 0;
+}



More information about the ffmpeg-cvslog mailing list