From 17d0933e5710a96c9a182f764e353a930721f104 Mon Sep 17 00:00:00 2001 From: diego Date: Sun, 15 Mar 2009 10:01:02 +0000 Subject: [PATCH] Get rid of pointless preprocessor condition indirection and use ARCH_X86 directly instead of CAN_COMPILE_X86_ASM. git-svn-id: svn://svn.mplayerhq.hu/mplayer/trunk@28956 b3059339-0415-0410-9bf9-f77b7e298cf2 --- libvo/aclib.c | 15 ++++++--------- libvo/osd.c | 25 +++++++++++-------------- 2 files changed, 17 insertions(+), 23 deletions(-) diff --git a/libvo/aclib.c b/libvo/aclib.c index cae86dd3d4..47c9dead6b 100644 --- a/libvo/aclib.c +++ b/libvo/aclib.c @@ -33,9 +33,6 @@ //Feel free to fine-tune the above 2, it might be possible to get some speedup with them :) //#define STATISTICS -#if ARCH_X86 -#define CAN_COMPILE_X86_ASM -#endif //Note: we have MMX, MMX2, 3DNOW version there is no 3DNOW+MMX2 one //Plain C versions @@ -43,7 +40,7 @@ //#define COMPILE_C //#endif -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 #if (HAVE_MMX && !HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT) #define COMPILE_MMX @@ -155,14 +152,14 @@ #include "aclib_template.c" #endif -#endif // CAN_COMPILE_X86_ASM +#endif /* ARCH_X86 */ #undef fast_memcpy void * fast_memcpy(void * to, const void * from, size_t len) { #ifdef RUNTIME_CPUDETECT -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 // ordered per speed fasterst first if(gCpuCaps.hasSSE2) fast_memcpy_SSE(to, from, len); @@ -173,7 +170,7 @@ void * fast_memcpy(void * to, const void * from, size_t len) else if(gCpuCaps.hasMMX) fast_memcpy_MMX(to, from, len); else -#endif //CAN_COMPILE_X86_ASM +#endif memcpy(to, from, len); // prior to mmx we use the standart memcpy #else #if HAVE_SSE2 @@ -196,7 +193,7 @@ void * fast_memcpy(void * to, const void * from, size_t len) void * mem2agpcpy(void * to, const void * from, size_t len) { #ifdef RUNTIME_CPUDETECT -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 // ordered per speed fasterst first if(gCpuCaps.hasSSE2) mem2agpcpy_SSE(to, from, len); @@ -207,7 +204,7 @@ void * mem2agpcpy(void * to, const void * from, size_t len) else if(gCpuCaps.hasMMX) mem2agpcpy_MMX(to, from, len); else -#endif //CAN_COMPILE_X86_ASM +#endif memcpy(to, from, len); // prior to mmx we use the standart memcpy #else #if HAVE_SSE2 diff --git a/libvo/osd.c b/libvo/osd.c index 6660500ecd..42dd4fcc69 100644 --- a/libvo/osd.c +++ b/libvo/osd.c @@ -30,10 +30,6 @@ #include "cpudetect.h" #if ARCH_X86 -#define CAN_COMPILE_X86_ASM -#endif - -#ifdef CAN_COMPILE_X86_ASM static const uint64_t bFF __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL; static const unsigned long long mask24lh __attribute__((aligned(8))) = 0xFFFF000000000000ULL; static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FFFFFFFFFFFFULL; @@ -45,7 +41,7 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF #define COMPILE_C #endif -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 #if (HAVE_MMX && !HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT) #define COMPILE_MMX @@ -58,7 +54,8 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF #if (HAVE_AMD3DNOW && !HAVE_MMX2) || defined (RUNTIME_CPUDETECT) #define COMPILE_3DNOW #endif -#endif //CAN_COMPILE_X86_ASM + +#endif /* ARCH_X86 */ #undef HAVE_MMX #undef HAVE_MMX2 @@ -67,7 +64,7 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF #define HAVE_MMX2 0 #define HAVE_AMD3DNOW 0 -#ifndef CAN_COMPILE_X86_ASM +#if ! ARCH_X86 #ifdef COMPILE_C #undef HAVE_MMX @@ -134,11 +131,11 @@ static const unsigned long long mask24hl __attribute__((aligned(8))) = 0x0000FF #include "osd_template.c" #endif -#endif //CAN_COMPILE_X86_ASM +#endif /* ARCH_X86 */ void vo_draw_alpha_yv12(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){ #ifdef RUNTIME_CPUDETECT -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 // ordered by speed / fastest first if(gCpuCaps.hasMMX2) vo_draw_alpha_yv12_MMX2(w, h, src, srca, srcstride, dstbase, dststride); @@ -168,7 +165,7 @@ void vo_draw_alpha_yv12(int w,int h, unsigned char* src, unsigned char *srca, in void vo_draw_alpha_yuy2(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){ #ifdef RUNTIME_CPUDETECT -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 // ordered by speed / fastest first if(gCpuCaps.hasMMX2) vo_draw_alpha_yuy2_MMX2(w, h, src, srca, srcstride, dstbase, dststride); @@ -198,7 +195,7 @@ void vo_draw_alpha_yuy2(int w,int h, unsigned char* src, unsigned char *srca, in void vo_draw_alpha_uyvy(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){ #ifdef RUNTIME_CPUDETECT -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 // ordered by speed / fastest first if(gCpuCaps.hasMMX2) vo_draw_alpha_uyvy_MMX2(w, h, src, srca, srcstride, dstbase, dststride); @@ -228,7 +225,7 @@ void vo_draw_alpha_uyvy(int w,int h, unsigned char* src, unsigned char *srca, in void vo_draw_alpha_rgb24(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){ #ifdef RUNTIME_CPUDETECT -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 // ordered by speed / fastest first if(gCpuCaps.hasMMX2) vo_draw_alpha_rgb24_MMX2(w, h, src, srca, srcstride, dstbase, dststride); @@ -258,7 +255,7 @@ void vo_draw_alpha_rgb24(int w,int h, unsigned char* src, unsigned char *srca, i void vo_draw_alpha_rgb32(int w,int h, unsigned char* src, unsigned char *srca, int srcstride, unsigned char* dstbase,int dststride){ #ifdef RUNTIME_CPUDETECT -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 // ordered by speed / fastest first if(gCpuCaps.hasMMX2) vo_draw_alpha_rgb32_MMX2(w, h, src, srca, srcstride, dstbase, dststride); @@ -303,7 +300,7 @@ void vo_draw_alpha_init(void){ if( mp_msg_test(MSGT_OSD,MSGL_V) ) { #ifdef RUNTIME_CPUDETECT -#ifdef CAN_COMPILE_X86_ASM +#if ARCH_X86 // ordered per speed fasterst first if(gCpuCaps.hasMMX2) mp_msg(MSGT_OSD,MSGL_INFO,"Using MMX (with tiny bit MMX2) Optimized OnScreenDisplay\n"); -- 2.11.4.GIT