[FFmpeg-cvslog] swscale/ppc: VSX-optimize non-full-chroma yuv2rgb_1

Lauri Kasanen git at videolan.org
Sun Apr 7 09:20:48 EEST 2019


ffmpeg | branch: master | Lauri Kasanen <cand at gmx.com> | Sun Mar 31 17:01:37 2019 +0300| [50e672bc54372f2bd228d83cc040fad8c06ad8ed] | committer: Lauri Kasanen

swscale/ppc: VSX-optimize non-full-chroma yuv2rgb_1

./ffmpeg -f lavfi -i yuvtestsrc=duration=1:size=1200x1440 -sws_flags fast_bilinear \
        -s 1200x1440 -f null -vframes 100 -pix_fmt $i -nostats \
        -cpuflags 0 -v error -

32-bit mul, power8 only.

1.8-2.3x speedup:

rgb24
  18192 UNITS in yuv2packed1,   32767 runs,      1 skips
   9983 UNITS in yuv2packed1,   32760 runs,      8 skips
bgr24
  18665 UNITS in yuv2packed1,   32766 runs,      2 skips
   9925 UNITS in yuv2packed1,   32763 runs,      5 skips
rgba
  20239 UNITS in yuv2packed1,   32767 runs,      1 skips
   8794 UNITS in yuv2packed1,   32759 runs,      9 skips
bgra
  20354 UNITS in yuv2packed1,   32768 runs,      0 skips
   8770 UNITS in yuv2packed1,   32761 runs,      7 skips
argb
  20185 UNITS in yuv2packed1,   32768 runs,      0 skips
   8761 UNITS in yuv2packed1,   32761 runs,      7 skips
bgra
  20360 UNITS in yuv2packed1,   32766 runs,      2 skips
   8759 UNITS in yuv2packed1,   32764 runs,      4 skips

This is a low speedup, but the x86 mmx version also gets only ~2x. The mmx version
is also heavily inaccurate, while the vsx version has high accuracy.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=50e672bc54372f2bd228d83cc040fad8c06ad8ed
---

 libswscale/ppc/swscale_vsx.c | 425 +++++++++++++++++++++++++++++++++----------
 1 file changed, 330 insertions(+), 95 deletions(-)

diff --git a/libswscale/ppc/swscale_vsx.c b/libswscale/ppc/swscale_vsx.c
index 69ec63d027..0ac8cacb51 100644
--- a/libswscale/ppc/swscale_vsx.c
+++ b/libswscale/ppc/swscale_vsx.c
@@ -422,6 +422,104 @@ yuv2NBPSX(16, BE, 1, 16, int32_t)
 yuv2NBPSX(16, LE, 0, 16, int32_t)
 #endif
 
+#define WRITERGB \
+        R_l = vec_max(R_l, zero32); \
+        R_r = vec_max(R_r, zero32); \
+        G_l = vec_max(G_l, zero32); \
+        G_r = vec_max(G_r, zero32); \
+        B_l = vec_max(B_l, zero32); \
+        B_r = vec_max(B_r, zero32); \
+\
+        R_l = vec_min(R_l, rgbclip); \
+        R_r = vec_min(R_r, rgbclip); \
+        G_l = vec_min(G_l, rgbclip); \
+        G_r = vec_min(G_r, rgbclip); \
+        B_l = vec_min(B_l, rgbclip); \
+        B_r = vec_min(B_r, rgbclip); \
+\
+        R_l = vec_sr(R_l, shift22); \
+        R_r = vec_sr(R_r, shift22); \
+        G_l = vec_sr(G_l, shift22); \
+        G_r = vec_sr(G_r, shift22); \
+        B_l = vec_sr(B_l, shift22); \
+        B_r = vec_sr(B_r, shift22); \
+\
+        rd16 = vec_packsu(R_l, R_r); \
+        gd16 = vec_packsu(G_l, G_r); \
+        bd16 = vec_packsu(B_l, B_r); \
+        rd = vec_packsu(rd16, zero16); \
+        gd = vec_packsu(gd16, zero16); \
+        bd = vec_packsu(bd16, zero16); \
+\
+        switch(target) { \
+        case AV_PIX_FMT_RGB24: \
+            out0 = vec_perm(rd, gd, perm3rg0); \
+            out0 = vec_perm(out0, bd, perm3tb0); \
+            out1 = vec_perm(rd, gd, perm3rg1); \
+            out1 = vec_perm(out1, bd, perm3tb1); \
+\
+            vec_vsx_st(out0, 0, dest); \
+            vec_vsx_st(out1, 16, dest); \
+\
+            dest += 24; \
+        break; \
+        case AV_PIX_FMT_BGR24: \
+            out0 = vec_perm(bd, gd, perm3rg0); \
+            out0 = vec_perm(out0, rd, perm3tb0); \
+            out1 = vec_perm(bd, gd, perm3rg1); \
+            out1 = vec_perm(out1, rd, perm3tb1); \
+\
+            vec_vsx_st(out0, 0, dest); \
+            vec_vsx_st(out1, 16, dest); \
+\
+            dest += 24; \
+        break; \
+        case AV_PIX_FMT_BGRA: \
+            out0 = vec_mergeh(bd, gd); \
+            out1 = vec_mergeh(rd, ad); \
+\
+            tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1); \
+            vec_vsx_st(tmp8, 0, dest); \
+            tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1); \
+            vec_vsx_st(tmp8, 16, dest); \
+\
+            dest += 32; \
+        break; \
+        case AV_PIX_FMT_RGBA: \
+            out0 = vec_mergeh(rd, gd); \
+            out1 = vec_mergeh(bd, ad); \
+\
+            tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1); \
+            vec_vsx_st(tmp8, 0, dest); \
+            tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1); \
+            vec_vsx_st(tmp8, 16, dest); \
+\
+            dest += 32; \
+        break; \
+        case AV_PIX_FMT_ARGB: \
+            out0 = vec_mergeh(ad, rd); \
+            out1 = vec_mergeh(gd, bd); \
+\
+            tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1); \
+            vec_vsx_st(tmp8, 0, dest); \
+            tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1); \
+            vec_vsx_st(tmp8, 16, dest); \
+\
+            dest += 32; \
+        break; \
+        case AV_PIX_FMT_ABGR: \
+            out0 = vec_mergeh(ad, bd); \
+            out1 = vec_mergeh(gd, rd); \
+\
+            tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1); \
+            vec_vsx_st(tmp8, 0, dest); \
+            tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1); \
+            vec_vsx_st(tmp8, 16, dest); \
+\
+            dest += 32; \
+        break; \
+        }
+
 static av_always_inline void
 yuv2rgb_full_1_vsx_template(SwsContext *c, const int16_t *buf0,
                      const int16_t *ubuf[2], const int16_t *vbuf[2],
@@ -547,105 +645,196 @@ yuv2rgb_full_1_vsx_template(SwsContext *c, const int16_t *buf0,
         B_r = vec_mul(vu32_r, u2b_coeff);
         B_r = vec_add(B_r, vy32_r);
 
-        R_l = vec_max(R_l, zero32);
-        R_r = vec_max(R_r, zero32);
-        G_l = vec_max(G_l, zero32);
-        G_r = vec_max(G_r, zero32);
-        B_l = vec_max(B_l, zero32);
-        B_r = vec_max(B_r, zero32);
-
-        R_l = vec_min(R_l, rgbclip);
-        R_r = vec_min(R_r, rgbclip);
-        G_l = vec_min(G_l, rgbclip);
-        G_r = vec_min(G_r, rgbclip);
-        B_l = vec_min(B_l, rgbclip);
-        B_r = vec_min(B_r, rgbclip);
-
-        R_l = vec_sr(R_l, shift22);
-        R_r = vec_sr(R_r, shift22);
-        G_l = vec_sr(G_l, shift22);
-        G_r = vec_sr(G_r, shift22);
-        B_l = vec_sr(B_l, shift22);
-        B_r = vec_sr(B_r, shift22);
-
-        rd16 = vec_packsu(R_l, R_r);
-        gd16 = vec_packsu(G_l, G_r);
-        bd16 = vec_packsu(B_l, B_r);
-        rd = vec_packsu(rd16, zero16);
-        gd = vec_packsu(gd16, zero16);
-        bd = vec_packsu(bd16, zero16);
-
-        switch(target) {
-        case AV_PIX_FMT_RGB24:
-            out0 = vec_perm(rd, gd, perm3rg0);
-            out0 = vec_perm(out0, bd, perm3tb0);
-            out1 = vec_perm(rd, gd, perm3rg1);
-            out1 = vec_perm(out1, bd, perm3tb1);
-
-            vec_vsx_st(out0, 0, dest);
-            vec_vsx_st(out1, 16, dest);
-
-            dest += 24;
-        break;
-        case AV_PIX_FMT_BGR24:
-            out0 = vec_perm(bd, gd, perm3rg0);
-            out0 = vec_perm(out0, rd, perm3tb0);
-            out1 = vec_perm(bd, gd, perm3rg1);
-            out1 = vec_perm(out1, rd, perm3tb1);
-
-            vec_vsx_st(out0, 0, dest);
-            vec_vsx_st(out1, 16, dest);
-
-            dest += 24;
-        break;
-        case AV_PIX_FMT_BGRA:
-            out0 = vec_mergeh(bd, gd);
-            out1 = vec_mergeh(rd, ad);
-
-            tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
-            vec_vsx_st(tmp8, 0, dest);
-            tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
-            vec_vsx_st(tmp8, 16, dest);
-
-            dest += 32;
-        break;
-        case AV_PIX_FMT_RGBA:
-            out0 = vec_mergeh(rd, gd);
-            out1 = vec_mergeh(bd, ad);
-
-            tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
-            vec_vsx_st(tmp8, 0, dest);
-            tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
-            vec_vsx_st(tmp8, 16, dest);
-
-            dest += 32;
-        break;
-        case AV_PIX_FMT_ARGB:
-            out0 = vec_mergeh(ad, rd);
-            out1 = vec_mergeh(gd, bd);
-
-            tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
-            vec_vsx_st(tmp8, 0, dest);
-            tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
-            vec_vsx_st(tmp8, 16, dest);
-
-            dest += 32;
-        break;
-        case AV_PIX_FMT_ABGR:
-            out0 = vec_mergeh(ad, bd);
-            out1 = vec_mergeh(gd, rd);
-
-            tmp8 = (vector uint8_t) vec_mergeh((vector uint16_t) out0, (vector uint16_t) out1);
-            vec_vsx_st(tmp8, 0, dest);
-            tmp8 = (vector uint8_t) vec_mergel((vector uint16_t) out0, (vector uint16_t) out1);
-            vec_vsx_st(tmp8, 16, dest);
-
-            dest += 32;
-        break;
+        WRITERGB
+    }
+}
+
+static av_always_inline void
+yuv2rgb_1_vsx_template(SwsContext *c, const int16_t *buf0,
+                     const int16_t *ubuf[2], const int16_t *vbuf[2],
+                     const int16_t *abuf0, uint8_t *dest, int dstW,
+                     int uvalpha, int y, enum AVPixelFormat target,
+                     int hasAlpha)
+{
+    const int16_t *ubuf0 = ubuf[0], *vbuf0 = vbuf[0];
+    const int16_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1];
+    vector int16_t vy, vu, vv, A = vec_splat_s16(0), tmp16;
+    vector int32_t vy32_l, vy32_r, vu32_l, vu32_r, vv32_l, vv32_r, tmp32, tmp32_2;
+    vector int32_t vud32_l, vud32_r, vvd32_l, vvd32_r;
+    vector int32_t R_l, R_r, G_l, G_r, B_l, B_r;
+    vector uint16_t rd16, gd16, bd16;
+    vector uint8_t rd, bd, gd, ad, out0, out1, tmp8;
+    const vector uint16_t zero16 = vec_splat_u16(0);
+    const vector int32_t y_offset = vec_splats(c->yuv2rgb_y_offset);
+    const vector int32_t y_coeff = vec_splats(c->yuv2rgb_y_coeff);
+    const vector int32_t y_add = vec_splats(1 << 21);
+    const vector int32_t v2r_coeff = vec_splats(c->yuv2rgb_v2r_coeff);
+    const vector int32_t v2g_coeff = vec_splats(c->yuv2rgb_v2g_coeff);
+    const vector int32_t u2g_coeff = vec_splats(c->yuv2rgb_u2g_coeff);
+    const vector int32_t u2b_coeff = vec_splats(c->yuv2rgb_u2b_coeff);
+    const vector int32_t rgbclip = vec_splats(1 << 30);
+    const vector int32_t zero32 = vec_splat_s32(0);
+    const vector uint32_t shift2 = vec_splat_u32(2);
+    const vector uint32_t shift22 = vec_splats(22U);
+    const vector uint16_t sub7 = vec_splats((uint16_t) (128 << 7));
+    const vector uint16_t sub8 = vec_splats((uint16_t) (128 << 8));
+    const vector int16_t mul4 = vec_splat_s16(4);
+    const vector int16_t mul8 = vec_splat_s16(8);
+    const vector int16_t add64 = vec_splat_s16(64);
+    const vector uint16_t shift7 = vec_splat_u16(7);
+    const vector int16_t max255 = vec_splat_s16(255);
+    int i;
+
+    // Various permutations
+    const vector uint8_t doubleleft = (vector uint8_t) {0, 1, 2, 3,
+                                                        0, 1, 2, 3,
+                                                        4, 5, 6, 7,
+                                                        4, 5, 6, 7 };
+    const vector uint8_t doubleright = (vector uint8_t) {8, 9, 10, 11,
+                                                        8, 9, 10, 11,
+                                                        12, 13, 14, 15,
+                                                        12, 13, 14, 15 };
+    const vector uint8_t perm3rg0 = (vector uint8_t) {0x0, 0x10, 0,
+                                                      0x1, 0x11, 0,
+                                                      0x2, 0x12, 0,
+                                                      0x3, 0x13, 0,
+                                                      0x4, 0x14, 0,
+                                                      0x5 };
+    const vector uint8_t perm3rg1 = (vector uint8_t) {     0x15, 0,
+                                                      0x6, 0x16, 0,
+                                                      0x7, 0x17, 0 };
+    const vector uint8_t perm3tb0 = (vector uint8_t) {0x0, 0x1, 0x10,
+                                                      0x3, 0x4, 0x11,
+                                                      0x6, 0x7, 0x12,
+                                                      0x9, 0xa, 0x13,
+                                                      0xc, 0xd, 0x14,
+                                                      0xf };
+    const vector uint8_t perm3tb1 = (vector uint8_t) {     0x0, 0x15,
+                                                      0x2, 0x3, 0x16,
+                                                      0x5, 0x6, 0x17 };
+
+    for (i = 0; i < (dstW + 1) >> 1; i += 8) { // The x86 asm also overwrites padding bytes.
+        vy = vec_ld(0, &buf0[i * 2]);
+        vy32_l = vec_unpackh(vy);
+        vy32_r = vec_unpackl(vy);
+        vy32_l = vec_sl(vy32_l, shift2);
+        vy32_r = vec_sl(vy32_r, shift2);
+
+        vu = vec_ld(0, &ubuf0[i]);
+        vv = vec_ld(0, &vbuf0[i]);
+        if (uvalpha < 2048) {
+            vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub7);
+            vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub7);
+
+            tmp32 = vec_mule(vu, mul4);
+            tmp32_2 = vec_mulo(vu, mul4);
+            vu32_l = vec_mergeh(tmp32, tmp32_2);
+            vu32_r = vec_mergel(tmp32, tmp32_2);
+            tmp32 = vec_mule(vv, mul4);
+            tmp32_2 = vec_mulo(vv, mul4);
+            vv32_l = vec_mergeh(tmp32, tmp32_2);
+            vv32_r = vec_mergel(tmp32, tmp32_2);
+        } else {
+            tmp16 = vec_ld(0, &ubuf1[i]);
+            vu = vec_add(vu, tmp16);
+            vu = (vector int16_t) vec_sub((vector uint16_t) vu, sub8);
+            tmp16 = vec_ld(0, &vbuf1[i]);
+            vv = vec_add(vv, tmp16);
+            vv = (vector int16_t) vec_sub((vector uint16_t) vv, sub8);
+
+            vu32_l = vec_mule(vu, mul8);
+            vu32_r = vec_mulo(vu, mul8);
+            vv32_l = vec_mule(vv, mul8);
+            vv32_r = vec_mulo(vv, mul8);
         }
+
+        if (hasAlpha) {
+            A = vec_ld(0, &abuf0[i]);
+            A = vec_add(A, add64);
+            A = vec_sr(A, shift7);
+            A = vec_max(A, max255);
+            ad = vec_packsu(A, (vector int16_t) zero16);
+        } else {
+            ad = vec_splats((uint8_t) 255);
+        }
+
+        vy32_l = vec_sub(vy32_l, y_offset);
+        vy32_r = vec_sub(vy32_r, y_offset);
+        vy32_l = vec_mul(vy32_l, y_coeff);
+        vy32_r = vec_mul(vy32_r, y_coeff);
+        vy32_l = vec_add(vy32_l, y_add);
+        vy32_r = vec_add(vy32_r, y_add);
+
+        // Use the first UV half
+        vud32_l = vec_perm(vu32_l, vu32_l, doubleleft);
+        vud32_r = vec_perm(vu32_l, vu32_l, doubleright);
+        vvd32_l = vec_perm(vv32_l, vv32_l, doubleleft);
+        vvd32_r = vec_perm(vv32_l, vv32_l, doubleright);
+
+        R_l = vec_mul(vvd32_l, v2r_coeff);
+        R_l = vec_add(R_l, vy32_l);
+        R_r = vec_mul(vvd32_r, v2r_coeff);
+        R_r = vec_add(R_r, vy32_r);
+        G_l = vec_mul(vvd32_l, v2g_coeff);
+        tmp32 = vec_mul(vud32_l, u2g_coeff);
+        G_l = vec_add(G_l, vy32_l);
+        G_l = vec_add(G_l, tmp32);
+        G_r = vec_mul(vvd32_r, v2g_coeff);
+        tmp32 = vec_mul(vud32_r, u2g_coeff);
+        G_r = vec_add(G_r, vy32_r);
+        G_r = vec_add(G_r, tmp32);
+
+        B_l = vec_mul(vud32_l, u2b_coeff);
+        B_l = vec_add(B_l, vy32_l);
+        B_r = vec_mul(vud32_r, u2b_coeff);
+        B_r = vec_add(B_r, vy32_r);
+
+        WRITERGB
+
+        // New Y for the second half
+        vy = vec_ld(16, &buf0[i * 2]);
+        vy32_l = vec_unpackh(vy);
+        vy32_r = vec_unpackl(vy);
+        vy32_l = vec_sl(vy32_l, shift2);
+        vy32_r = vec_sl(vy32_r, shift2);
+
+        vy32_l = vec_sub(vy32_l, y_offset);
+        vy32_r = vec_sub(vy32_r, y_offset);
+        vy32_l = vec_mul(vy32_l, y_coeff);
+        vy32_r = vec_mul(vy32_r, y_coeff);
+        vy32_l = vec_add(vy32_l, y_add);
+        vy32_r = vec_add(vy32_r, y_add);
+
+        // Second UV half
+        vud32_l = vec_perm(vu32_r, vu32_r, doubleleft);
+        vud32_r = vec_perm(vu32_r, vu32_r, doubleright);
+        vvd32_l = vec_perm(vv32_r, vv32_r, doubleleft);
+        vvd32_r = vec_perm(vv32_r, vv32_r, doubleright);
+
+        R_l = vec_mul(vvd32_l, v2r_coeff);
+        R_l = vec_add(R_l, vy32_l);
+        R_r = vec_mul(vvd32_r, v2r_coeff);
+        R_r = vec_add(R_r, vy32_r);
+        G_l = vec_mul(vvd32_l, v2g_coeff);
+        tmp32 = vec_mul(vud32_l, u2g_coeff);
+        G_l = vec_add(G_l, vy32_l);
+        G_l = vec_add(G_l, tmp32);
+        G_r = vec_mul(vvd32_r, v2g_coeff);
+        tmp32 = vec_mul(vud32_r, u2g_coeff);
+        G_r = vec_add(G_r, vy32_r);
+        G_r = vec_add(G_r, tmp32);
+
+        B_l = vec_mul(vud32_l, u2b_coeff);
+        B_l = vec_add(B_l, vy32_l);
+        B_r = vec_mul(vud32_r, u2b_coeff);
+        B_r = vec_add(B_r, vy32_r);
+
+        WRITERGB
     }
 }
 
+#undef WRITERGB
+
 #define YUV2RGBWRAPPER(name, base, ext, fmt, hasAlpha) \
 static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
                                 const int16_t *ubuf[2], const int16_t *vbuf[2], \
@@ -656,6 +845,14 @@ static void name ## ext ## _1_vsx(SwsContext *c, const int16_t *buf0, \
                                   dstW, uvalpha, y, fmt, hasAlpha); \
 }
 
+YUV2RGBWRAPPER(yuv2, rgb, bgrx32, AV_PIX_FMT_BGRA,  0)
+YUV2RGBWRAPPER(yuv2, rgb, rgbx32, AV_PIX_FMT_RGBA,  0)
+YUV2RGBWRAPPER(yuv2, rgb, xrgb32, AV_PIX_FMT_ARGB,  0)
+YUV2RGBWRAPPER(yuv2, rgb, xbgr32, AV_PIX_FMT_ABGR,  0)
+
+YUV2RGBWRAPPER(yuv2, rgb, rgb24, AV_PIX_FMT_RGB24,   0)
+YUV2RGBWRAPPER(yuv2, rgb, bgr24, AV_PIX_FMT_BGR24,   0)
+
 YUV2RGBWRAPPER(yuv2, rgb_full, bgrx32_full, AV_PIX_FMT_BGRA,  0)
 YUV2RGBWRAPPER(yuv2, rgb_full, rgbx32_full, AV_PIX_FMT_RGBA,  0)
 YUV2RGBWRAPPER(yuv2, rgb_full, xrgb32_full, AV_PIX_FMT_ARGB,  0)
@@ -1089,6 +1286,44 @@ av_cold void ff_sws_init_swscale_vsx(SwsContext *c)
                 c->yuv2packed2 = yuv2uyvy422_2_vsx;
                 c->yuv2packedX = yuv2uyvy422_X_vsx;
             break;
+            case AV_PIX_FMT_BGRA:
+                if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+                    if (!c->needAlpha) {
+                        c->yuv2packed1 = yuv2bgrx32_1_vsx;
+                    }
+                }
+            break;
+            case AV_PIX_FMT_RGBA:
+                if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+                    if (!c->needAlpha) {
+                        c->yuv2packed1 = yuv2rgbx32_1_vsx;
+                    }
+                }
+            break;
+            case AV_PIX_FMT_ARGB:
+                if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+                    if (!c->needAlpha) {
+                        c->yuv2packed1 = yuv2xrgb32_1_vsx;
+                    }
+                }
+            break;
+            case AV_PIX_FMT_ABGR:
+                if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+                    if (!c->needAlpha) {
+                        c->yuv2packed1 = yuv2xbgr32_1_vsx;
+                    }
+                }
+            break;
+            case AV_PIX_FMT_RGB24:
+                if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+                    c->yuv2packed1 = yuv2rgb24_1_vsx;
+                }
+            break;
+            case AV_PIX_FMT_BGR24:
+                if (HAVE_POWER8 && cpu_flags & AV_CPU_FLAG_POWER8) {
+                    c->yuv2packed1 = yuv2bgr24_1_vsx;
+                }
+            break;
         }
     }
 #endif /* !HAVE_BIGENDIAN */



More information about the ffmpeg-cvslog mailing list