[PATCH] Move to libavutil CPU MM flags detection API, and renames the function mm_support() to av_get_cpu_mm_flags() and the global mm_flags to av_cpu_mm_flags.
Stefano Sabatini
stefano.sabatini-lala
Mon Aug 23 20:30:07 CEST 2010
Based on a patch by Baptiste, see thread:
Subject: [FFmpeg-devel] [PATCH] yadif port to libavfitler
Date: Thu, 20 May 2010 01:37:35 -0700
---
libavcodec/arm/dsputil_init_arm.c | 6 +--
libavcodec/arm/dsputil_iwmmxt.c | 8 +--
libavcodec/arm/mpegvideo_iwmmxt.c | 2 +-
libavcodec/avcodec.h | 5 +-
libavcodec/dct-test.c | 7 ++-
libavcodec/dsputil.h | 13 +-----
libavcodec/h263dec.c | 2 +-
libavcodec/ppc/dsputil_ppc.c | 15 +------
libavcodec/ppc/mpegvideo_altivec.c | 2 +-
libavcodec/x86/Makefile | 3 +-
libavcodec/x86/cavsdsp_mmx.c | 2 +-
libavcodec/x86/dnxhd_mmx.c | 2 +-
libavcodec/x86/dsputil_mmx.c | 73 ++++++++++++++++-----------------
libavcodec/x86/dsputilenc_mmx.c | 18 ++++----
libavcodec/x86/fft.c | 4 +-
libavcodec/x86/h264dsp_mmx.c | 12 +++---
libavcodec/x86/motion_est_mmx.c | 6 +-
libavcodec/x86/mpegaudiodec_mmx.c | 4 +-
libavcodec/x86/mpegvideo_mmx.c | 10 ++--
libavcodec/x86/snowdsp_mmx.c | 8 ++--
libavcodec/x86/vc1dsp_mmx.c | 14 +++---
libavcodec/x86/vp8dsp-init.c | 16 ++++----
libavutil/Makefile | 4 +-
{libavcodec/x86 => libavutil}/cpuid.c | 45 +++++++++++++++++----
libavutil/cpuid.h | 52 +++++++++++++++++++++++
25 files changed, 194 insertions(+), 139 deletions(-)
rename {libavcodec/x86 => libavutil}/cpuid.c (88%)
create mode 100644 libavutil/cpuid.h
diff --git a/libavcodec/arm/dsputil_init_arm.c b/libavcodec/arm/dsputil_init_arm.c
index c9c3351..e84d9b0 100644
--- a/libavcodec/arm/dsputil_init_arm.c
+++ b/libavcodec/arm/dsputil_init_arm.c
@@ -19,6 +19,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/cpuid.h"
#include "libavcodec/dsputil.h"
#include "dsputil_arm.h"
@@ -73,11 +74,6 @@ static void simple_idct_arm_add(uint8_t *dest, int line_size, DCTELEM *block)
ff_add_pixels_clamped(block, dest, line_size);
}
-int mm_support(void)
-{
- return HAVE_IWMMXT * FF_MM_IWMMXT;
-}
-
void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx)
{
ff_put_pixels_clamped = c->put_pixels_clamped;
diff --git a/libavcodec/arm/dsputil_iwmmxt.c b/libavcodec/arm/dsputil_iwmmxt.c
index 6a23732..8b9b905 100644
--- a/libavcodec/arm/dsputil_iwmmxt.c
+++ b/libavcodec/arm/dsputil_iwmmxt.c
@@ -150,18 +150,16 @@ static void nop(uint8_t *block, const uint8_t *pixels, int line_size, int h)
/* A run time test is not simple. If this file is compiled in
* then we should install the functions
*/
-int mm_flags = FF_MM_IWMMXT; /* multimedia extension flags */
-
void ff_dsputil_init_iwmmxt(DSPContext* c, AVCodecContext *avctx)
{
if (avctx->dsp_mask) {
if (avctx->dsp_mask & FF_MM_FORCE)
- mm_flags |= (avctx->dsp_mask & 0xffff);
+ av_cpu_mm_flags |= (avctx->dsp_mask & 0xffff);
else
- mm_flags &= ~(avctx->dsp_mask & 0xffff);
+ av_cpu_mm_flags &= ~(avctx->dsp_mask & 0xffff);
}
- if (!(mm_flags & FF_MM_IWMMXT)) return;
+ if (!(av_cpu_mm_flags & FF_MM_IWMMXT)) return;
c->add_pixels_clamped = add_pixels_clamped_iwmmxt;
diff --git a/libavcodec/arm/mpegvideo_iwmmxt.c b/libavcodec/arm/mpegvideo_iwmmxt.c
index 9e3878f..ef0daf5 100644
--- a/libavcodec/arm/mpegvideo_iwmmxt.c
+++ b/libavcodec/arm/mpegvideo_iwmmxt.c
@@ -111,7 +111,7 @@ static void dct_unquantize_h263_inter_iwmmxt(MpegEncContext *s,
void MPV_common_init_iwmmxt(MpegEncContext *s)
{
- if (!(mm_flags & FF_MM_IWMMXT)) return;
+ if (!(av_cpu_mm_flags & FF_MM_IWMMXT)) return;
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_iwmmxt;
#if 0
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 042b5c9..a383761 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -1649,13 +1649,13 @@ typedef struct AVCodecContext {
* result into program crash.)
*/
unsigned dsp_mask;
+
+#if LIBAVCODEC_VERSION_MAJOR < 53
#define FF_MM_FORCE 0x80000000 /* Force usage of selected flags (OR) */
/* lower 16 bits - CPU features */
#define FF_MM_MMX 0x0001 ///< standard MMX
#define FF_MM_3DNOW 0x0004 ///< AMD 3DNOW
-#if LIBAVCODEC_VERSION_MAJOR < 53
#define FF_MM_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
-#endif
#define FF_MM_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
#define FF_MM_SSE 0x0008 ///< SSE functions
#define FF_MM_SSE2 0x0010 ///< PIV SSE2 functions
@@ -1670,6 +1670,7 @@ typedef struct AVCodecContext {
#define FF_MM_SSE42 0x0200 ///< Nehalem SSE4.2 functions
#define FF_MM_IWMMXT 0x0100 ///< XScale IWMMXT
#define FF_MM_ALTIVEC 0x0001 ///< standard AltiVec
+#endif
/**
* bits per sample/pixel from the demuxer (needed for huffyuv).
diff --git a/libavcodec/dct-test.c b/libavcodec/dct-test.c
index 4f0a0c6..f43c5c6 100644
--- a/libavcodec/dct-test.c
+++ b/libavcodec/dct-test.c
@@ -33,6 +33,7 @@
#include <math.h>
#include "libavutil/common.h"
+#include "libavutil/cpuid.h"
#include "libavutil/lfg.h"
#include "simple_idct.h"
@@ -72,7 +73,7 @@ struct algo {
void (* func) (DCTELEM *block);
void (* ref) (DCTELEM *block);
enum formattag { NO_PERM,MMX_PERM, MMX_SIMPLE_PERM, SCALE_PERM, SSE2_PERM, PARTTRANS_PERM } format;
- int mm_support;
+ int mm_flags;
};
#ifndef FAAN_POSTSCALE
@@ -554,7 +555,7 @@ int main(int argc, char **argv)
int test_idct = 0, test_248_dct = 0;
int c,i;
int test=1;
- cpu_flags = mm_support();
+ cpu_flags = av_get_cpu_mm_flags();
ff_ref_dct_init();
idct_mmx_init();
@@ -591,7 +592,7 @@ int main(int argc, char **argv)
idct248_error("SIMPLE-C", ff_simple_idct248_put);
} else {
for (i=0;algos[i].name;i++)
- if (algos[i].is_idct == test_idct && !(~cpu_flags & algos[i].mm_support)) {
+ if (algos[i].is_idct == test_idct && !(~cpu_flags & algos[i].mm_flags)) {
dct_error (algos[i].name, algos[i].is_idct, algos[i].func, algos[i].ref, algos[i].format, test);
}
}
diff --git a/libavcodec/dsputil.h b/libavcodec/dsputil.h
index 9ef0270..f9585ef 100644
--- a/libavcodec/dsputil.h
+++ b/libavcodec/dsputil.h
@@ -30,6 +30,7 @@
#ifndef AVCODEC_DSPUTIL_H
#define AVCODEC_DSPUTIL_H
+#include "libavutil/cpuid.h"
#include "libavutil/intreadwrite.h"
#include "avcodec.h"
@@ -615,11 +616,6 @@ static inline int get_penalty_factor(int lambda, int lambda2, int type){
*/
#define emms_c()
-/* should be defined by architectures supporting
- one or more MultiMedia extension */
-int mm_support(void);
-extern int mm_flags;
-
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx);
void dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
@@ -650,7 +646,7 @@ static inline void emms(void)
#define emms_c() \
{\
- if (mm_flags & FF_MM_MMX)\
+ if (av_cpu_mm_flags & FF_MM_MMX)\
emms();\
}
@@ -668,11 +664,6 @@ static inline void emms(void)
#define STRIDE_ALIGN 16
-#else
-
-#define mm_flags 0
-#define mm_support() 0
-
#endif
#ifndef STRIDE_ALIGN
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index d9d6f7e..7439879 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -553,7 +553,7 @@ retry:
#endif
#if HAVE_MMX
- if(s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (mm_flags & FF_MM_MMX)){
+ if (s->codec_id == CODEC_ID_MPEG4 && s->xvid_build>=0 && avctx->idct_algo == FF_IDCT_AUTO && (av_cpu_mm_flags & FF_MM_MMX)) {
avctx->idct_algo= FF_IDCT_XVIDMMX;
avctx->coded_width= 0; // force reinit
// dsputil_init(&s->dsp, avctx);
diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c
index 229ca09..f5714c3 100644
--- a/libavcodec/ppc/dsputil_ppc.c
+++ b/libavcodec/ppc/dsputil_ppc.c
@@ -23,19 +23,6 @@
#include "libavcodec/dsputil.h"
#include "dsputil_altivec.h"
-int mm_flags = 0;
-
-int mm_support(void)
-{
- int result = 0;
-#if HAVE_ALTIVEC
- if (has_altivec()) {
- result |= FF_MM_ALTIVEC;
- }
-#endif /* result */
- return result;
-}
-
/* ***** WARNING ***** WARNING ***** WARNING ***** */
/*
clear_blocks_dcbz32_ppc will not work properly on PowerPC processors with a
@@ -182,7 +169,7 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
if(CONFIG_H264_DECODER) dsputil_h264_init_ppc(c, avctx);
if (has_altivec()) {
- mm_flags |= FF_MM_ALTIVEC;
+ av_cpu_mm_flags |= FF_MM_ALTIVEC;
dsputil_init_altivec(c, avctx);
if(CONFIG_VC1_DECODER)
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
index 0126b7f..03d2dd7 100644
--- a/libavcodec/ppc/mpegvideo_altivec.c
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -570,7 +570,7 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s,
void MPV_common_init_altivec(MpegEncContext *s)
{
- if ((mm_flags & FF_MM_ALTIVEC) == 0) return;
+ if ((av_cpu_mm_flags & FF_MM_ALTIVEC) == 0) return;
if (s->avctx->lowres==0) {
if ((s->avctx->idct_algo == FF_IDCT_AUTO) ||
diff --git a/libavcodec/x86/Makefile b/libavcodec/x86/Makefile
index 6397378..2822435 100644
--- a/libavcodec/x86/Makefile
+++ b/libavcodec/x86/Makefile
@@ -42,8 +42,7 @@ MMX-OBJS-$(HAVE_YASM) += x86/dsputil_yasm.o \
MMX-OBJS-$(CONFIG_FFT) += x86/fft.o
-OBJS-$(HAVE_MMX) += x86/cpuid.o \
- x86/dnxhd_mmx.o \
+OBJS-$(HAVE_MMX) += x86/dnxhd_mmx.o \
x86/dsputil_mmx.o \
x86/fdct_mmx.o \
x86/idct_mmx_xvid.o \
diff --git a/libavcodec/x86/cavsdsp_mmx.c b/libavcodec/x86/cavsdsp_mmx.c
index 7872b3c..7024235 100644
--- a/libavcodec/x86/cavsdsp_mmx.c
+++ b/libavcodec/x86/cavsdsp_mmx.c
@@ -472,7 +472,7 @@ static void ff_cavsdsp_init_3dnow(CAVSDSPContext* c, AVCodecContext *avctx) {
void ff_cavsdsp_init_mmx(CAVSDSPContext *c, AVCodecContext *avctx)
{
- int mm_flags = mm_support();
+ int mm_flags = av_get_cpu_mm_flags();
if (mm_flags & FF_MM_MMX2) ff_cavsdsp_init_mmx2 (c, avctx);
if (mm_flags & FF_MM_3DNOW) ff_cavsdsp_init_3dnow(c, avctx);
diff --git a/libavcodec/x86/dnxhd_mmx.c b/libavcodec/x86/dnxhd_mmx.c
index 59bcb39..b77d37d 100644
--- a/libavcodec/x86/dnxhd_mmx.c
+++ b/libavcodec/x86/dnxhd_mmx.c
@@ -52,7 +52,7 @@ static void get_pixels_8x4_sym_sse2(DCTELEM *block, const uint8_t *pixels, int l
void ff_dnxhd_init_mmx(DNXHDEncContext *ctx)
{
- if (mm_flags & FF_MM_SSE2) {
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
ctx->get_pixels_8x4_sym = get_pixels_8x4_sym_sse2;
}
}
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index 2b96be3..403e88f 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -22,6 +22,7 @@
* MMX optimization by Nick Kurshev <nickols_k at mail.ru>
*/
+#include "libavutil/cpuid.h"
#include "libavutil/x86_cpu.h"
#include "libavcodec/dsputil.h"
#include "libavcodec/h264dsp.h"
@@ -37,8 +38,6 @@
//#undef NDEBUG
//#include <assert.h>
-int mm_flags; /* multimedia extension flags */
-
/* pixel operations */
DECLARE_ALIGNED(8, const uint64_t, ff_bone) = 0x0101010101010101ULL;
DECLARE_ALIGNED(8, const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
@@ -2504,31 +2503,31 @@ float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
{
- mm_flags = mm_support();
+ av_cpu_mm_flags = av_get_cpu_mm_flags();
if (avctx->dsp_mask) {
if (avctx->dsp_mask & FF_MM_FORCE)
- mm_flags |= (avctx->dsp_mask & 0xffff);
+ av_cpu_mm_flags |= (avctx->dsp_mask & 0xffff);
else
- mm_flags &= ~(avctx->dsp_mask & 0xffff);
+ av_cpu_mm_flags &= ~(avctx->dsp_mask & 0xffff);
}
#if 0
av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
- if (mm_flags & FF_MM_MMX)
+ if (av_cpu_mm_flags & FF_MM_MMX)
av_log(avctx, AV_LOG_INFO, " mmx");
- if (mm_flags & FF_MM_MMX2)
+ if (av_cpu_mm_flags & FF_MM_MMX2)
av_log(avctx, AV_LOG_INFO, " mmx2");
- if (mm_flags & FF_MM_3DNOW)
+ if (av_cpu_mm_flags & FF_MM_3DNOW)
av_log(avctx, AV_LOG_INFO, " 3dnow");
- if (mm_flags & FF_MM_SSE)
+ if (av_cpu_mm_flags & FF_MM_SSE)
av_log(avctx, AV_LOG_INFO, " sse");
- if (mm_flags & FF_MM_SSE2)
+ if (av_cpu_mm_flags & FF_MM_SSE2)
av_log(avctx, AV_LOG_INFO, " sse2");
av_log(avctx, AV_LOG_INFO, "\n");
#endif
- if (mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_MMX) {
const int idct_algo= avctx->idct_algo;
if(avctx->lowres==0){
@@ -2539,7 +2538,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
#if CONFIG_GPL
}else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
- if(mm_flags & FF_MM_MMX2){
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
c->idct_put= ff_libmpeg2mmx2_idct_put;
c->idct_add= ff_libmpeg2mmx2_idct_add;
c->idct = ff_mmxext_idct;
@@ -2552,7 +2551,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
#endif
}else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
idct_algo==FF_IDCT_VP3){
- if(mm_flags & FF_MM_SSE2){
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
c->idct_put= ff_vp3_idct_put_sse2;
c->idct_add= ff_vp3_idct_add_sse2;
c->idct = ff_vp3_idct_sse2;
@@ -2566,12 +2565,12 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}else if(idct_algo==FF_IDCT_CAVS){
c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
}else if(idct_algo==FF_IDCT_XVIDMMX){
- if(mm_flags & FF_MM_SSE2){
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
c->idct_put= ff_idct_xvid_sse2_put;
c->idct_add= ff_idct_xvid_sse2_add;
c->idct = ff_idct_xvid_sse2;
c->idct_permutation_type= FF_SSE2_IDCT_PERM;
- }else if(mm_flags & FF_MM_MMX2){
+ } else if (av_cpu_mm_flags & FF_MM_MMX2) {
c->idct_put= ff_idct_xvid_mmx2_put;
c->idct_add= ff_idct_xvid_mmx2_add;
c->idct = ff_idct_xvid_mmx2;
@@ -2588,7 +2587,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->add_pixels_clamped = add_pixels_clamped_mmx;
c->clear_block = clear_block_mmx;
c->clear_blocks = clear_blocks_mmx;
- if ((mm_flags & FF_MM_SSE) &&
+ if ((av_cpu_mm_flags & FF_MM_SSE) &&
!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
/* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
c->clear_block = clear_block_sse;
@@ -2632,7 +2631,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
}
- if (mm_flags & FF_MM_MMX2) {
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
c->prefetch = prefetch_mmx2;
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
@@ -2723,7 +2722,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
#endif
#if HAVE_7REGS && HAVE_TEN_OPERANDS
- if( mm_flags&FF_MM_3DNOW )
+ if (av_cpu_mm_flags&FF_MM_3DNOW )
c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
#endif
@@ -2731,7 +2730,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
ff_vc1dsp_init_mmx(c, avctx);
c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
- } else if (mm_flags & FF_MM_3DNOW) {
+ } else if (av_cpu_mm_flags & FF_MM_3DNOW) {
c->prefetch = prefetch_3dnow;
c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
@@ -2795,13 +2794,13 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
- if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
+ if ((av_cpu_mm_flags & FF_MM_SSE2) && !(av_cpu_mm_flags & FF_MM_3DNOW)) {
// these functions are slower than mmx on AMD, but faster on Intel
c->put_pixels_tab[0][0] = put_pixels16_sse2;
c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
H264_QPEL_FUNCS(0, 0, sse2);
}
- if(mm_flags & FF_MM_SSE2){
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
H264_QPEL_FUNCS(0, 1, sse2);
H264_QPEL_FUNCS(0, 2, sse2);
H264_QPEL_FUNCS(0, 3, sse2);
@@ -2820,7 +2819,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
}
#if HAVE_SSSE3
- if(mm_flags & FF_MM_SSSE3){
+ if (av_cpu_mm_flags & FF_MM_SSSE3) {
H264_QPEL_FUNCS(1, 0, ssse3);
H264_QPEL_FUNCS(1, 1, ssse3);
H264_QPEL_FUNCS(1, 2, ssse3);
@@ -2842,13 +2841,13 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
#if HAVE_YASM
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
- if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe
+ if (av_cpu_mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe
c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
#endif
}
#endif
- if(mm_flags & FF_MM_3DNOW){
+ if (av_cpu_mm_flags & FF_MM_3DNOW) {
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
c->vector_fmul = vector_fmul_3dnow;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
@@ -2856,20 +2855,20 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
}
}
- if(mm_flags & FF_MM_3DNOWEXT){
+ if (av_cpu_mm_flags & FF_MM_3DNOWEXT) {
c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
c->vector_fmul_window = vector_fmul_window_3dnow2;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
}
}
- if(mm_flags & FF_MM_MMX2){
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
#if HAVE_YASM
c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
#endif
}
- if(mm_flags & FF_MM_SSE){
+ if (av_cpu_mm_flags & FF_MM_SSE) {
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
c->ac3_downmix = ac3_downmix_sse;
c->vector_fmul = vector_fmul_sse;
@@ -2884,9 +2883,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->scalarproduct_float = ff_scalarproduct_float_sse;
#endif
}
- if(mm_flags & FF_MM_3DNOW)
+ if (av_cpu_mm_flags & FF_MM_3DNOW)
c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
- if(mm_flags & FF_MM_SSE2){
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
c->float_to_int16 = float_to_int16_sse2;
c->float_to_int16_interleave = float_to_int16_interleave_sse2;
@@ -2895,7 +2894,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
#endif
}
- if((mm_flags & FF_MM_SSSE3) && !(mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit
+ if ((av_cpu_mm_flags & FF_MM_SSSE3) && !(av_cpu_mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
}
@@ -2941,9 +2940,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
#if CONFIG_H264DSP
void ff_h264dsp_init_x86(H264DSPContext *c)
{
- mm_flags = mm_support();
+ av_cpu_mm_flags = av_get_cpu_mm_flags();
- if (mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_MMX) {
c->h264_idct_dc_add=
c->h264_idct_add= ff_h264_idct_add_mmx;
c->h264_idct8_dc_add=
@@ -2954,7 +2953,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c)
c->h264_idct_add8 = ff_h264_idct_add8_mmx;
c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
- if (mm_flags & FF_MM_MMX2) {
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
c->h264_idct_add16 = ff_h264_idct_add16_mmx2;
@@ -2988,18 +2987,18 @@ void ff_h264dsp_init_x86(H264DSPContext *c)
c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
}
- if(mm_flags & FF_MM_SSE2){
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
c->h264_idct8_add = ff_h264_idct8_add_sse2;
c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
}
#if HAVE_YASM
- if (mm_flags & FF_MM_MMX2){
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
#if ARCH_X86_32
c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
#endif
- if( mm_flags&FF_MM_SSE2 ){
+ if (av_cpu_mm_flags&FF_MM_SSE2) {
c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_sse2;
c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_sse2;
#if ARCH_X86_64 || !defined(__ICC) || __ICC > 1110
@@ -3014,7 +3013,7 @@ void ff_h264dsp_init_x86(H264DSPContext *c)
c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
#endif
}
- if ( mm_flags&FF_MM_SSSE3 ){
+ if (av_cpu_mm_flags&FF_MM_SSSE3) {
c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_ssse3;
c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_ssse3;
}
diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c
index d3e412a..b9fb0ce 100644
--- a/libavcodec/x86/dsputilenc_mmx.c
+++ b/libavcodec/x86/dsputilenc_mmx.c
@@ -1350,12 +1350,12 @@ static int ssd_int8_vs_int16_mmx(const int8_t *pix1, const int16_t *pix2, int si
void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
{
- if (mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_MMX) {
const int dct_algo = avctx->dct_algo;
if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
- if(mm_flags & FF_MM_SSE2){
+ if (av_cpu_mm_flags & FF_MM_SSE2){
c->fdct = ff_fdct_sse2;
- }else if(mm_flags & FF_MM_MMX2){
+ } else if (av_cpu_mm_flags & FF_MM_MMX2){
c->fdct = ff_fdct_mmx2;
}else{
c->fdct = ff_fdct_mmx;
@@ -1373,7 +1373,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->hadamard8_diff[1]= hadamard8_diff_mmx;
c->pix_norm1 = pix_norm1_mmx;
- c->sse[0] = (mm_flags & FF_MM_SSE2) ? sse16_sse2 : sse16_mmx;
+ c->sse[0] = (av_cpu_mm_flags & FF_MM_SSE2) ? sse16_sse2 : sse16_mmx;
c->sse[1] = sse8_mmx;
c->vsad[4]= vsad_intra16_mmx;
@@ -1391,7 +1391,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->ssd_int8_vs_int16 = ssd_int8_vs_int16_mmx;
- if (mm_flags & FF_MM_MMX2) {
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
c->sum_abs_dctelem= sum_abs_dctelem_mmx2;
c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
c->hadamard8_diff[1]= hadamard8_diff_mmx2;
@@ -1404,19 +1404,19 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
}
- if(mm_flags & FF_MM_SSE2){
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
c->get_pixels = get_pixels_sse2;
c->sum_abs_dctelem= sum_abs_dctelem_sse2;
c->hadamard8_diff[0]= hadamard8_diff16_sse2;
c->hadamard8_diff[1]= hadamard8_diff_sse2;
}
- if (CONFIG_LPC && mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) {
+ if (CONFIG_LPC && av_cpu_mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) {
c->lpc_compute_autocorr = ff_lpc_compute_autocorr_sse2;
}
#if HAVE_SSSE3
- if(mm_flags & FF_MM_SSSE3){
+ if (av_cpu_mm_flags & FF_MM_SSSE3) {
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->try_8x8basis= try_8x8basis_ssse3;
}
@@ -1427,7 +1427,7 @@ void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx)
}
#endif
- if(mm_flags & FF_MM_3DNOW){
+ if (av_cpu_mm_flags & FF_MM_3DNOW) {
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->try_8x8basis= try_8x8basis_3dnow;
}
diff --git a/libavcodec/x86/fft.c b/libavcodec/x86/fft.c
index eb5c65e..3c2f222 100644
--- a/libavcodec/x86/fft.c
+++ b/libavcodec/x86/fft.c
@@ -22,7 +22,7 @@
av_cold void ff_fft_init_mmx(FFTContext *s)
{
#if HAVE_YASM
- int has_vectors = mm_support();
+ int has_vectors = av_get_cpu_mm_flags();
if (has_vectors & FF_MM_SSE && HAVE_SSE) {
/* SSE for P3/P4/K8 */
s->imdct_calc = ff_imdct_calc_sse;
@@ -46,7 +46,7 @@ av_cold void ff_fft_init_mmx(FFTContext *s)
#if CONFIG_DCT
av_cold void ff_dct_init_mmx(DCTContext *s)
{
- int has_vectors = mm_support();
+ int has_vectors = av_get_cpu_mm_flags();
if (has_vectors & FF_MM_SSE && HAVE_SSE)
s->dct32 = ff_dct32_float_sse;
}
diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c
index ebd8751..dff284b 100644
--- a/libavcodec/x86/h264dsp_mmx.c
+++ b/libavcodec/x86/h264dsp_mmx.c
@@ -2368,10 +2368,10 @@ void ff_pred4x4_vertical_vp8_mmxext(uint8_t *src, const uint8_t *topright, int s
#if CONFIG_H264PRED
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
{
- mm_flags = mm_support();
+ av_cpu_mm_flags = av_get_cpu_mm_flags();
#if HAVE_YASM
- if (mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_MMX) {
h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_mmx;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmx;
h->pred8x8 [VERT_PRED8x8] = ff_pred8x8_vertical_mmx;
@@ -2383,7 +2383,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
}
}
- if (mm_flags & FF_MM_MMX2) {
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_mmxext;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_mmxext;
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_mmxext;
@@ -2397,11 +2397,11 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
}
}
- if (mm_flags & FF_MM_SSE) {
+ if (av_cpu_mm_flags & FF_MM_SSE) {
h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_sse;
}
- if (mm_flags & FF_MM_SSE2) {
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_sse2;
if (codec_id == CODEC_ID_VP8) {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_tm_vp8_sse2;
@@ -2409,7 +2409,7 @@ void ff_h264_pred_init_x86(H264PredContext *h, int codec_id)
}
}
- if (mm_flags & FF_MM_SSSE3) {
+ if (av_cpu_mm_flags & FF_MM_SSSE3) {
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_ssse3;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_ssse3;
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_ssse3;
diff --git a/libavcodec/x86/motion_est_mmx.c b/libavcodec/x86/motion_est_mmx.c
index 0272410..a893faf 100644
--- a/libavcodec/x86/motion_est_mmx.c
+++ b/libavcodec/x86/motion_est_mmx.c
@@ -427,7 +427,7 @@ PIX_SAD(mmx2)
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
{
- if (mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_MMX) {
c->pix_abs[0][0] = sad16_mmx;
c->pix_abs[0][1] = sad16_x2_mmx;
c->pix_abs[0][2] = sad16_y2_mmx;
@@ -440,7 +440,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
c->sad[0]= sad16_mmx;
c->sad[1]= sad8_mmx;
}
- if (mm_flags & FF_MM_MMX2) {
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
c->pix_abs[0][0] = sad16_mmx2;
c->pix_abs[1][0] = sad8_mmx2;
@@ -456,7 +456,7 @@ void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
c->pix_abs[1][3] = sad8_xy2_mmx2;
}
}
- if ((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) {
+ if ((av_cpu_mm_flags & FF_MM_SSE2) && !(av_cpu_mm_flags & FF_MM_3DNOW) && avctx->codec_id != CODEC_ID_SNOW) {
c->sad[0]= sad16_sse2;
}
}
diff --git a/libavcodec/x86/mpegaudiodec_mmx.c b/libavcodec/x86/mpegaudiodec_mmx.c
index e7e1150..0de6554 100644
--- a/libavcodec/x86/mpegaudiodec_mmx.c
+++ b/libavcodec/x86/mpegaudiodec_mmx.c
@@ -149,9 +149,9 @@ static void apply_window_mp3(float *in, float *win, int *unused, float *out,
void ff_mpegaudiodec_init_mmx(MPADecodeContext *s)
{
- mm_flags = mm_support();
+ av_cpu_mm_flags = av_get_cpu_mm_flags();
- if (mm_flags & FF_MM_SSE2) {
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
s->apply_window_mp3 = apply_window_mp3;
}
}
diff --git a/libavcodec/x86/mpegvideo_mmx.c b/libavcodec/x86/mpegvideo_mmx.c
index 5deb68d..7419a7d 100644
--- a/libavcodec/x86/mpegvideo_mmx.c
+++ b/libavcodec/x86/mpegvideo_mmx.c
@@ -625,7 +625,7 @@ static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){
void MPV_common_init_mmx(MpegEncContext *s)
{
- if (mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_MMX) {
const int dct_algo = s->avctx->dct_algo;
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx;
@@ -636,7 +636,7 @@ void MPV_common_init_mmx(MpegEncContext *s)
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx;
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx;
- if (mm_flags & FF_MM_SSE2) {
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
s->denoise_dct= denoise_dct_sse2;
} else {
s->denoise_dct= denoise_dct_mmx;
@@ -644,13 +644,13 @@ void MPV_common_init_mmx(MpegEncContext *s)
if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
#if HAVE_SSSE3
- if(mm_flags & FF_MM_SSSE3){
+ if (av_cpu_mm_flags & FF_MM_SSSE3) {
s->dct_quantize= dct_quantize_SSSE3;
} else
#endif
- if(mm_flags & FF_MM_SSE2){
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
s->dct_quantize= dct_quantize_SSE2;
- } else if(mm_flags & FF_MM_MMX2){
+ } else if (av_cpu_mm_flags & FF_MM_MMX2) {
s->dct_quantize= dct_quantize_MMX2;
} else {
s->dct_quantize= dct_quantize_MMX;
diff --git a/libavcodec/x86/snowdsp_mmx.c b/libavcodec/x86/snowdsp_mmx.c
index 263f0bb..eed553f 100644
--- a/libavcodec/x86/snowdsp_mmx.c
+++ b/libavcodec/x86/snowdsp_mmx.c
@@ -874,10 +874,10 @@ static void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_str
void ff_dwt_init_x86(DWTContext *c)
{
- mm_flags = mm_support();
+ av_cpu_mm_flags = av_get_cpu_mm_flags();
- if (mm_flags & FF_MM_MMX) {
- if(mm_flags & FF_MM_SSE2 & 0){
+ if (av_cpu_mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_SSE2 & 0) {
c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
#if HAVE_7REGS
c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
@@ -885,7 +885,7 @@ void ff_dwt_init_x86(DWTContext *c)
c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
}
else{
- if(mm_flags & FF_MM_MMX2){
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
#if HAVE_7REGS
c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c
index 3ce0978..fc38eae 100644
--- a/libavcodec/x86/vc1dsp_mmx.c
+++ b/libavcodec/x86/vc1dsp_mmx.c
@@ -714,7 +714,7 @@ static void vc1_h_loop_filter16_sse4(uint8_t *src, int stride, int pq)
#endif
void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
- mm_flags = mm_support();
+ av_cpu_mm_flags = av_get_cpu_mm_flags();
dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
@@ -736,7 +736,7 @@ void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
- if (mm_flags & FF_MM_MMX2){
+ if (av_cpu_mm_flags & FF_MM_MMX2){
dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2;
dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2;
dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2;
@@ -772,23 +772,23 @@ void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT
#if HAVE_YASM
- if (mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_MMX) {
ASSIGN_LF(mmx);
}
return;
- if (mm_flags & FF_MM_MMX2) {
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
ASSIGN_LF(mmx2);
}
- if (mm_flags & FF_MM_SSE2) {
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_sse2;
dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse2;
dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_sse2;
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse2;
}
- if (mm_flags & FF_MM_SSSE3) {
+ if (av_cpu_mm_flags & FF_MM_SSSE3) {
ASSIGN_LF(ssse3);
}
- if (mm_flags & FF_MM_SSE4) {
+ if (av_cpu_mm_flags & FF_MM_SSE4) {
dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_sse4;
dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_sse4;
}
diff --git a/libavcodec/x86/vp8dsp-init.c b/libavcodec/x86/vp8dsp-init.c
index aceec6a..64373d0 100644
--- a/libavcodec/x86/vp8dsp-init.c
+++ b/libavcodec/x86/vp8dsp-init.c
@@ -282,10 +282,10 @@ DECLARE_LOOP_FILTER(sse4)
av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
{
- mm_flags = mm_support();
+ av_cpu_mm_flags = av_get_cpu_mm_flags();
#if HAVE_YASM
- if (mm_flags & FF_MM_MMX) {
+ if (av_cpu_mm_flags & FF_MM_MMX) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_mmx;
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_mmx;
c->vp8_idct_dc_add4uv = ff_vp8_idct_dc_add4uv_mmx;
@@ -312,7 +312,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
/* note that 4-tap width=16 functions are missing because w=16
* is only used for luma, and luma is always a copy or sixtap. */
- if (mm_flags & FF_MM_MMX2) {
+ if (av_cpu_mm_flags & FF_MM_MMX2) {
VP8_LUMA_MC_FUNC(0, 16, mmxext);
VP8_MC_FUNC(1, 8, mmxext);
VP8_MC_FUNC(2, 4, mmxext);
@@ -334,14 +334,14 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_mmxext;
}
- if (mm_flags & FF_MM_SSE) {
+ if (av_cpu_mm_flags & FF_MM_SSE) {
c->vp8_idct_add = ff_vp8_idct_add_sse;
c->vp8_luma_dc_wht = ff_vp8_luma_dc_wht_sse;
c->put_vp8_epel_pixels_tab[0][0][0] =
c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_sse;
}
- if (mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) {
+ if (av_cpu_mm_flags & (FF_MM_SSE2|FF_MM_SSE2SLOW)) {
VP8_LUMA_MC_FUNC(0, 16, sse2);
VP8_MC_FUNC(1, 8, sse2);
VP8_BILINEAR_MC_FUNC(0, 16, sse2);
@@ -356,7 +356,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_mbedge_sse2;
}
- if (mm_flags & FF_MM_SSE2) {
+ if (av_cpu_mm_flags & FF_MM_SSE2) {
c->vp8_idct_dc_add4y = ff_vp8_idct_dc_add4y_sse2;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse2;
@@ -368,7 +368,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_sse2;
}
- if (mm_flags & FF_MM_SSSE3) {
+ if (av_cpu_mm_flags & FF_MM_SSSE3) {
VP8_LUMA_MC_FUNC(0, 16, ssse3);
VP8_MC_FUNC(1, 8, ssse3);
VP8_MC_FUNC(2, 4, ssse3);
@@ -390,7 +390,7 @@ av_cold void ff_vp8dsp_init_x86(VP8DSPContext* c)
c->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_mbedge_ssse3;
}
- if (mm_flags & FF_MM_SSE4) {
+ if (av_cpu_mm_flags & FF_MM_SSE4) {
c->vp8_idct_dc_add = ff_vp8_idct_dc_add_sse4;
c->vp8_h_loop_filter_simple = ff_vp8_h_loop_filter_simple_sse4;
diff --git a/libavutil/Makefile b/libavutil/Makefile
index 0c25ce1..c70afd3 100644
--- a/libavutil/Makefile
+++ b/libavutil/Makefile
@@ -9,6 +9,7 @@ HEADERS = adler32.h \
base64.h \
bswap.h \
common.h \
+ cpuid.h \
crc.h \
error.h \
eval.h \
@@ -33,6 +34,7 @@ OBJS = adler32.o \
aes.o \
avstring.o \
base64.o \
+ cpuid.o \
crc.o \
des.o \
error.o \
@@ -55,7 +57,7 @@ OBJS = adler32.o \
tree.o \
utils.o \
-TESTPROGS = adler32 aes base64 crc des lls md5 pca sha softfloat tree
+TESTPROGS = adler32 aes base64 cpuid crc des lls md5 pca sha softfloat tree
TESTPROGS-$(HAVE_LZO1X_999_COMPRESS) += lzo
DIRS = arm bfin sh4 x86
diff --git a/libavcodec/x86/cpuid.c b/libavutil/cpuid.c
similarity index 88%
rename from libavcodec/x86/cpuid.c
rename to libavutil/cpuid.c
index f9afd6e..943ebe0 100644
--- a/libavcodec/x86/cpuid.c
+++ b/libavutil/cpuid.c
@@ -20,12 +20,12 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <stdlib.h>
-#include "libavutil/x86_cpu.h"
-#include "libavcodec/dsputil.h"
+#include "cpuid.h"
+#include "x86_cpu.h"
-#undef printf
+int av_cpu_mm_flags = 0;
+#if ARCH_X86
/* ebx saving is necessary for PIC. gcc seems unable to see it alone */
#define cpuid(index,eax,ebx,ecx,edx)\
__asm__ volatile\
@@ -37,7 +37,7 @@
: "0" (index));
/* Function to test if multimedia instructions are supported... */
-int mm_support(void)
+int av_get_cpu_mm_flags(void)
{
int rval = 0;
int eax, ebx, ecx, edx;
@@ -138,13 +138,42 @@ int mm_support(void)
#endif
return rval;
}
+#elif ARCH_PPC
+
+int av_get_cpu_mm_flags(void)
+{
+ int result = 0;
+#if HAVE_ALTIVEC
+ if (has_altivec()) {
+ result |= FF_MM_ALTIVEC;
+ }
+#endif /* result */
+ return result;
+}
+#elif ARCH_ARM
+
+int av_get_cpu_mm_flags(void)
+{
+ return HAVE_IWMMXT * FF_MM_IWMMXT;
+}
+
+#else
+int av_get_cpu_mm_flags(void)
+{
+ return 0;
+}
+#endif
#ifdef TEST
-int main ( void )
+
+#undef printf
+
+int main(void)
{
int mm_flags;
- mm_flags = mm_support();
- printf("mm_support = 0x%08X\n",mm_flags);
+ mm_flags = av_get_cpu_mm_flags();
+ printf("mm_flags = 0x%08X\n", mm_flags);
return 0;
}
#endif
+
diff --git a/libavutil/cpuid.h b/libavutil/cpuid.h
new file mode 100644
index 0000000..c259144
--- /dev/null
+++ b/libavutil/cpuid.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPUID_H
+#define AVUTIL_CPUID_H
+
+#include "avutil.h"
+
+#define FF_MM_FORCE 0x80000000 /* Force usage of selected flags (OR) */
+
+ /* lower 16 bits - CPU features */
+#define FF_MM_MMX 0x0001 ///< standard MMX
+#define FF_MM_3DNOW 0x0004 ///< AMD 3DNOW
+#define FF_MM_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#define FF_MM_SSE 0x0008 ///< SSE functions
+#define FF_MM_SSE2 0x0010 ///< PIV SSE2 functions
+#define FF_MM_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
+#define FF_MM_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define FF_MM_SSE3 0x0040 ///< Prescott SSE3 functions
+#define FF_MM_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster than regular MMX/SSE (e.g. Core1)
+#define FF_MM_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define FF_MM_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define FF_MM_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define FF_MM_IWMMXT 0x0100 ///< XScale IWMMXT
+#define FF_MM_ALTIVEC 0x0001 ///< standard AltiVec
+
+extern int av_cpu_mm_flags;
+
+/**
+ * Returns the flags which specify the CPU MultiMedia supported
+ * extensions.
+ */
+int av_get_cpu_mm_flags(void);
+
+#endif /* AVUTIL_CPUID_H */
--
1.7.1
--0F1p//8PRICkK4MW--
More information about the ffmpeg-devel
mailing list