[FFmpeg-cvslog] Merge commit '10f4511f14a4e830c0ed471df4cd1cc2a18a481a'
James Almer
git at videolan.org
Thu Oct 26 20:09:09 EEST 2017
ffmpeg | branch: master | James Almer <jamrial at gmail.com> | Thu Oct 26 14:06:34 2017 -0300| [5de85c1029881970dc69e529dc48e277f3b5336c] | committer: James Almer
Merge commit '10f4511f14a4e830c0ed471df4cd1cc2a18a481a'
* commit '10f4511f14a4e830c0ed471df4cd1cc2a18a481a':
libavutil: Make LOCAL_ALIGNED(xx be equal to LOCAL_ALIGNED_xx(
Also added LOCAL_ALIGNED_4 as it's used in vp8 decoder, and
simplified the configure defines.
Merged-by: James Almer <jamrial at gmail.com>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=5de85c1029881970dc69e529dc48e277f3b5336c
---
configure | 10 ++++------
libavutil/internal.h | 21 ++++++++++++++-------
2 files changed, 18 insertions(+), 13 deletions(-)
diff --git a/configure b/configure
index c86e5788fa..7767fb2596 100755
--- a/configure
+++ b/configure
@@ -1881,9 +1881,7 @@ ARCH_FEATURES="
fast_64bit
fast_clz
fast_cmov
- local_aligned_8
- local_aligned_16
- local_aligned_32
+ local_aligned
simd_align_16
simd_align_32
"
@@ -4682,7 +4680,7 @@ elif enabled mips; then
loongson*)
enable loongson2
enable loongson3
- enable local_aligned_8 local_aligned_16 local_aligned_32
+ enable local_aligned
enable simd_align_16
enable fast_64bit
enable fast_clz
@@ -5574,7 +5572,7 @@ elif enabled parisc; then
elif enabled ppc; then
- enable local_aligned_8 local_aligned_16 local_aligned_32
+ enable local_aligned
check_inline_asm dcbzl '"dcbzl 0, %0" :: "r"(0)'
check_inline_asm ibm_asm '"add 0, 0, 0"'
@@ -5615,7 +5613,7 @@ elif enabled x86; then
check_builtin rdtsc intrin.h "__rdtsc()"
check_builtin mm_empty mmintrin.h "_mm_empty()"
- enable local_aligned_8 local_aligned_16 local_aligned_32
+ enable local_aligned
# check whether EBP is available on x86
# As 'i' is stored on the stack, this program will crash
diff --git a/libavutil/internal.h b/libavutil/internal.h
index a2d73e3cc6..6f92f71e8e 100644
--- a/libavutil/internal.h
+++ b/libavutil/internal.h
@@ -43,6 +43,7 @@
#include "cpu.h"
#include "dict.h"
#include "macros.h"
+#include "mem.h"
#include "pixfmt.h"
#include "version.h"
@@ -110,24 +111,30 @@
DECLARE_ALIGNED(a, t, la_##v) s o; \
t (*v) o = la_##v
-#define LOCAL_ALIGNED(a, t, v, ...) E1(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
+#define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
-#if HAVE_LOCAL_ALIGNED_8
+#if HAVE_LOCAL_ALIGNED
+# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
+#else
+# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
+#endif
+
+#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
#else
-# define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
+# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
#endif
-#if HAVE_LOCAL_ALIGNED_16
+#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
#else
-# define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
+# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
#endif
-#if HAVE_LOCAL_ALIGNED_32
+#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
#else
-# define LOCAL_ALIGNED_32(t, v, ...) LOCAL_ALIGNED(32, t, v, __VA_ARGS__)
+# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
#endif
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)\
======================================================================
diff --cc configure
index c86e5788fa,f1488cf494..7767fb2596
--- a/configure
+++ b/configure
@@@ -1881,9 -1517,9 +1881,7 @@@ ARCH_FEATURES=
fast_64bit
fast_clz
fast_cmov
-- local_aligned_8
-- local_aligned_16
-- local_aligned_32
++ local_aligned
simd_align_16
simd_align_32
"
@@@ -4661,98 -3704,6 +4659,98 @@@ elif enabled mips; the
cpuflags="-march=$cpu"
+ if [ "$cpu" != "generic" ]; then
+ disable mips32r2
+ disable mips32r5
+ disable mips64r2
+ disable mips32r6
+ disable mips64r6
+ disable loongson2
+ disable loongson3
+
+ case $cpu in
+ 24kc|24kf*|24kec|34kc|1004kc|24kef*|34kf*|1004kf*|74kc|74kf)
+ enable mips32r2
+ disable msa
+ ;;
+ p5600|i6400|p6600)
+ disable mipsdsp
+ disable mipsdspr2
+ ;;
+ loongson*)
+ enable loongson2
+ enable loongson3
- enable local_aligned_8 local_aligned_16 local_aligned_32
++ enable local_aligned
+ enable simd_align_16
+ enable fast_64bit
+ enable fast_clz
+ enable fast_cmov
+ enable fast_unaligned
+ disable aligned_stack
+ disable mipsfpu
+ disable mipsdsp
+ disable mipsdspr2
+ case $cpu in
+ loongson3*)
+ cpuflags="-march=loongson3a -mhard-float -fno-expensive-optimizations"
+ ;;
+ loongson2e)
+ cpuflags="-march=loongson2e -mhard-float -fno-expensive-optimizations"
+ ;;
+ loongson2f)
+ cpuflags="-march=loongson2f -mhard-float -fno-expensive-optimizations"
+ ;;
+ esac
+ ;;
+ *)
+ # Unknown CPU. Disable everything.
+ warn "unknown CPU. Disabling all MIPS optimizations."
+ disable mipsfpu
+ disable mipsdsp
+ disable mipsdspr2
+ disable msa
+ disable mmi
+ ;;
+ esac
+
+ case $cpu in
+ 24kc)
+ disable mipsfpu
+ disable mipsdsp
+ disable mipsdspr2
+ ;;
+ 24kf*)
+ disable mipsdsp
+ disable mipsdspr2
+ ;;
+ 24kec|34kc|1004kc)
+ disable mipsfpu
+ disable mipsdspr2
+ ;;
+ 24kef*|34kf*|1004kf*)
+ disable mipsdspr2
+ ;;
+ 74kc)
+ disable mipsfpu
+ ;;
+ p5600)
+ enable mips32r5
+ check_cflags "-mtune=p5600" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops"
+ ;;
+ i6400)
+ enable mips64r6
+ check_cflags "-mtune=i6400 -mabi=64" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops" && check_ldflags "-mabi=64"
+ ;;
+ p6600)
+ enable mips64r6
+ check_cflags "-mtune=p6600 -mabi=64" && check_cflags "-msched-weight -mload-store-pairs -funroll-loops" && check_ldflags "-mabi=64"
+ ;;
+ esac
+ else
+ # We do not disable anything. Is up to the user to disable the unwanted features.
+ warn 'generic cpu selected'
+ fi
+
elif enabled ppc; then
disable ldbrx
@@@ -5574,7 -4413,7 +5572,7 @@@ elif enabled parisc; the
elif enabled ppc; then
-- enable local_aligned_8 local_aligned_16 local_aligned_32
++ enable local_aligned
check_inline_asm dcbzl '"dcbzl 0, %0" :: "r"(0)'
check_inline_asm ibm_asm '"add 0, 0, 0"'
@@@ -5615,7 -4454,7 +5613,7 @@@ elif enabled x86; the
check_builtin rdtsc intrin.h "__rdtsc()"
check_builtin mm_empty mmintrin.h "_mm_empty()"
-- enable local_aligned_8 local_aligned_16 local_aligned_32
++ enable local_aligned
# check whether EBP is available on x86
# As 'i' is stored on the stack, this program will crash
diff --cc libavutil/internal.h
index a2d73e3cc6,8a0076f985..6f92f71e8e
--- a/libavutil/internal.h
+++ b/libavutil/internal.h
@@@ -39,12 -36,10 +39,13 @@@
#include <assert.h>
#include "config.h"
#include "attributes.h"
+#include "timer.h"
+#include "cpu.h"
#include "dict.h"
#include "macros.h"
+ #include "mem.h"
#include "pixfmt.h"
+#include "version.h"
#if ARCH_X86
# include "x86/emms.h"
@@@ -110,24 -98,24 +111,30 @@@
DECLARE_ALIGNED(a, t, la_##v) s o; \
t (*v) o = la_##v
- #define LOCAL_ALIGNED(a, t, v, ...) E1(LOCAL_ALIGNED_A(a, t, v, __VA_ARGS__,,))
+ #define LOCAL_ALIGNED(a, t, v, ...) LOCAL_ALIGNED_##a(t, v, __VA_ARGS__)
--#if HAVE_LOCAL_ALIGNED_8
++#if HAVE_LOCAL_ALIGNED
++# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_D(4, t, v, __VA_ARGS__,,))
++#else
++# define LOCAL_ALIGNED_4(t, v, ...) E1(LOCAL_ALIGNED_A(4, t, v, __VA_ARGS__,,))
++#endif
++
++#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_D(8, t, v, __VA_ARGS__,,))
#else
- # define LOCAL_ALIGNED_8(t, v, ...) LOCAL_ALIGNED(8, t, v, __VA_ARGS__)
+ # define LOCAL_ALIGNED_8(t, v, ...) E1(LOCAL_ALIGNED_A(8, t, v, __VA_ARGS__,,))
#endif
--#if HAVE_LOCAL_ALIGNED_16
++#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_D(16, t, v, __VA_ARGS__,,))
#else
- # define LOCAL_ALIGNED_16(t, v, ...) LOCAL_ALIGNED(16, t, v, __VA_ARGS__)
+ # define LOCAL_ALIGNED_16(t, v, ...) E1(LOCAL_ALIGNED_A(16, t, v, __VA_ARGS__,,))
#endif
--#if HAVE_LOCAL_ALIGNED_32
++#if HAVE_LOCAL_ALIGNED
# define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_D(32, t, v, __VA_ARGS__,,))
#else
- # define LOCAL_ALIGNED_32(t, v, ...) LOCAL_ALIGNED(32, t, v, __VA_ARGS__)
+ # define LOCAL_ALIGNED_32(t, v, ...) E1(LOCAL_ALIGNED_A(32, t, v, __VA_ARGS__,,))
#endif
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)\
More information about the ffmpeg-cvslog
mailing list