diff options
author | Sebastian Dröge <slomo@circular-chaos.org> | 2008-07-13 10:52:03 +0000 |
---|---|---|
committer | Sebastian Dröge <slomo@circular-chaos.org> | 2008-07-13 10:52:03 +0000 |
commit | 856a1e45eed1c576313f39485d59cee510bdf8ef (patch) | |
tree | 5442e76d4793f191ac997015de270c58377b77f6 /gst/deinterlace2/tvtime | |
parent | 9a392ef442ef284f63c36ef3690445e5938a046a (diff) | |
download | gst-plugins-bad-856a1e45eed1c576313f39485d59cee510bdf8ef.tar.gz gst-plugins-bad-856a1e45eed1c576313f39485d59cee510bdf8ef.tar.bz2 gst-plugins-bad-856a1e45eed1c576313f39485d59cee510bdf8ef.zip |
gst/deinterlace2/tvtime/: Some cleanup, use 3DNOW instead of TDNOW in macros.
Original commit message from CVS:
* gst/deinterlace2/tvtime/greedyh.asm:
* gst/deinterlace2/tvtime/greedyh.c:
* gst/deinterlace2/tvtime/greedyhmacros.h:
Some cleanup, use 3DNOW instead of TDNOW in macros.
* gst/deinterlace2/tvtime/tomsmocomp.c:
(gst_deinterlace_method_tomsmocomp_class_init):
* gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc:
* gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h:
The SSE method in fact only needs MMXEXT, declare it as such.
Diffstat (limited to 'gst/deinterlace2/tvtime')
-rw-r--r-- | gst/deinterlace2/tvtime/greedyh.asm | 2 | ||||
-rw-r--r-- | gst/deinterlace2/tvtime/greedyh.c | 6 | ||||
-rw-r--r-- | gst/deinterlace2/tvtime/greedyhmacros.h | 27 | ||||
-rw-r--r-- | gst/deinterlace2/tvtime/tomsmocomp.c | 22 | ||||
-rw-r--r-- | gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc | 10 | ||||
-rw-r--r-- | gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h | 36 |
6 files changed, 51 insertions, 52 deletions
diff --git a/gst/deinterlace2/tvtime/greedyh.asm b/gst/deinterlace2/tvtime/greedyh.asm index 3cc88945..9272bc30 100644 --- a/gst/deinterlace2/tvtime/greedyh.asm +++ b/gst/deinterlace2/tvtime/greedyh.asm @@ -184,7 +184,7 @@ FUNCT_NAME (GstDeinterlaceMethodGreedyH *self, uint8_t * L1, uint8_t * L2, uint8 "psubusb %[MotionThreshold], %%mm0\n\t" // test Threshold, clear chroma change >>>?? "pmullw %[MotionSense], %%mm0\n\t" // mul by user factor, keep low 16 bits "movq %[QW256], %%mm7\n\t" -#if SIMD_TYPE == MMXEXT +#ifdef IS_MMXEXT "pminsw %%mm7, %%mm0\n\t" // max = 256 #else "paddusw %[QW256B], %%mm0\n\t" // add, may sat at fff.. diff --git a/gst/deinterlace2/tvtime/greedyh.c b/gst/deinterlace2/tvtime/greedyh.c index 530d6289..7f26e048 100644 --- a/gst/deinterlace2/tvtime/greedyh.c +++ b/gst/deinterlace2/tvtime/greedyh.c @@ -215,12 +215,12 @@ greedyDScaler_C (GstDeinterlaceMethodGreedyH * self, uint8_t * L1, uint8_t * L2, #undef IS_MMXEXT #undef FUNCT_NAME -#define IS_TDNOW -#define SIMD_TYPE TDNOW +#define IS_3DNOW +#define SIMD_TYPE 3DNOW #define FUNCT_NAME greedyDScaler_3DNOW #include "greedyh.asm" #undef SIMD_TYPE -#undef IS_TDNOW +#undef IS_3DNOW #undef FUNCT_NAME #define IS_MMX diff --git a/gst/deinterlace2/tvtime/greedyhmacros.h b/gst/deinterlace2/tvtime/greedyhmacros.h index 3f1c72c9..0386c28e 100644 --- a/gst/deinterlace2/tvtime/greedyhmacros.h +++ b/gst/deinterlace2/tvtime/greedyhmacros.h @@ -21,7 +21,7 @@ // BEFORE USING THESE YOU MUST SET: -// #define SIMD_TYPE MMXEXT (or MMX or TDNOW) +// #define SIMD_TYPE MMXEXT (or MMX or 3DNOW) // some macros for pavgb instruction // V_PAVGB(mmr1, mmr2, mmr work register, smask) mmr2 may = mmrw if you can trash it @@ -34,20 +34,20 @@ "psrlw $1, "mmr1"\n\t" \ "paddusb "mmrw", "mmr1"\n\t" #define V_PAVGB_MMXEXT(mmr1, mmr2, mmrw, smask) "pavgb "mmr2", "mmr1"\n\t" -#define V_PAVGB_TDNOW(mmr1, mmr2, mmrw, smask) "pavgusb "mmr2", "mmr1"\n\t" +#define V_PAVGB_3DNOW(mmr1, mmr2, mmrw, smask) "pavgusb "mmr2", "mmr1"\n\t" #define V_PAVGB(mmr1, mmr2, mmrw, smask) V_PAVGB2(mmr1, mmr2, mmrw, smask, SIMD_TYPE) -#define V_PAVGB2(mmr1, mmr2, mmrw, smask, simdtype) V_PAVGB3(mmr1, mmr2, mmrw, smask, simdtype) -#define V_PAVGB3(mmr1, mmr2, mmrw, smask, simdtype) V_PAVGB_##simdtype(mmr1, mmr2, mmrw, smask) +#define V_PAVGB2(mmr1, mmr2, mmrw, smask, simd_type) V_PAVGB3(mmr1, mmr2, mmrw, smask, simd_type) +#define V_PAVGB3(mmr1, mmr2, mmrw, smask, simd_type) V_PAVGB_##simd_type(mmr1, mmr2, mmrw, smask) // some macros for pmaxub instruction #define V_PMAXUB_MMX(mmr1, mmr2) \ "psubusb "mmr2", "mmr1"\n\t" \ "paddusb "mmr2", "mmr1"\n\t" #define V_PMAXUB_MMXEXT(mmr1, mmr2) "pmaxub "mmr2", "mmr1"\n\t" -#define V_PMAXUB_TDNOW(mmr1, mmr2) V_PMAXUB_MMX(mmr1, mmr2) // use MMX version +#define V_PMAXUB_3DNOW(mmr1, mmr2) V_PMAXUB_MMX(mmr1, mmr2) // use MMX version #define V_PMAXUB(mmr1, mmr2) V_PMAXUB2(mmr1, mmr2, SIMD_TYPE) -#define V_PMAXUB2(mmr1, mmr2, simdtype) V_PMAXUB3(mmr1, mmr2, simdtype) -#define V_PMAXUB3(mmr1, mmr2, simdtype) V_PMAXUB_##simdtype(mmr1, mmr2) +#define V_PMAXUB2(mmr1, mmr2, simd_type) V_PMAXUB3(mmr1, mmr2, simd_type) +#define V_PMAXUB3(mmr1, mmr2, simd_type) V_PMAXUB_##simd_type(mmr1, mmr2) // some macros for pminub instruction // V_PMINUB(mmr1, mmr2, mmr work register) mmr2 may NOT = mmrw @@ -57,18 +57,19 @@ "paddusb "mmrw", "mmr1"\n\t" \ "psubusb "mmrw", "mmr1"\n\t" #define V_PMINUB_MMXEXT(mmr1, mmr2, mmrw) "pminub "mmr2", "mmr1"\n\t" -#define V_PMINUB_TDNOW(mmr1, mmr2, mmrw) V_PMINUB_MMX(mmr1, mmr2, mmrw) // use MMX version +#define V_PMINUB_3DNOW(mmr1, mmr2, mmrw) V_PMINUB_MMX(mmr1, mmr2, mmrw) // use MMX version #define V_PMINUB(mmr1, mmr2, mmrw) V_PMINUB2(mmr1, mmr2, mmrw, SIMD_TYPE) -#define V_PMINUB2(mmr1, mmr2, mmrw, simdtype) V_PMINUB3(mmr1, mmr2, mmrw, simdtype) -#define V_PMINUB3(mmr1, mmr2, mmrw, simdtype) V_PMINUB_##simdtype(mmr1, mmr2, mmrw) +#define V_PMINUB2(mmr1, mmr2, mmrw, simd_type) V_PMINUB3(mmr1, mmr2, mmrw, simd_type) +#define V_PMINUB3(mmr1, mmr2, mmrw, simd_type) V_PMINUB_##simd_type(mmr1, mmr2, mmrw) // some macros for movntq instruction // V_MOVNTQ(mmr1, mmr2) #define V_MOVNTQ_MMX(mmr1, mmr2) "movq "mmr2", "mmr1"\n\t" -#define V_MOVNTQ_TDNOW(mmr1, mmr2) "movq "mmr2", "mmr1"\n\t" +#define V_MOVNTQ_3DNOW(mmr1, mmr2) "movq "mmr2", "mmr1"\n\t" #define V_MOVNTQ_MMXEXT(mmr1, mmr2) "movntq "mmr2", "mmr1"\n\t" #define V_MOVNTQ(mmr1, mmr2) V_MOVNTQ2(mmr1, mmr2, SIMD_TYPE) -#define V_MOVNTQ2(mmr1, mmr2, simdtype) V_MOVNTQ3(mmr1, mmr2, simdtype) -#define V_MOVNTQ3(mmr1, mmr2, simdtype) V_MOVNTQ_##simdtype(mmr1, mmr2) +#define V_MOVNTQ2(mmr1, mmr2, simd_type) V_MOVNTQ3(mmr1, mmr2, simd_type) +#define V_MOVNTQ3(mmr1, mmr2, simd_type) V_MOVNTQ_##simd_type(mmr1, mmr2) // end of macros + diff --git a/gst/deinterlace2/tvtime/tomsmocomp.c b/gst/deinterlace2/tvtime/tomsmocomp.c index 7a0ba6b3..3e2fb541 100644 --- a/gst/deinterlace2/tvtime/tomsmocomp.c +++ b/gst/deinterlace2/tvtime/tomsmocomp.c @@ -74,27 +74,27 @@ Fieldcopy (void *dest, const void *src, size_t count, #define IS_MMX -#define SSE_TYPE MMX +#define SIMD_TYPE MMX #define FUNCT_NAME tomsmocompDScaler_MMX #include "tomsmocomp/TomsMoCompAll.inc" #undef IS_MMX -#undef SSE_TYPE +#undef SIMD_TYPE #undef FUNCT_NAME #define IS_3DNOW -#define SSE_TYPE 3DNOW +#define SIMD_TYPE 3DNOW #define FUNCT_NAME tomsmocompDScaler_3DNOW #include "tomsmocomp/TomsMoCompAll.inc" #undef IS_3DNOW -#undef SSE_TYPE +#undef SIMD_TYPE #undef FUNCT_NAME -#define IS_SSE -#define SSE_TYPE SSE -#define FUNCT_NAME tomsmocompDScaler_SSE +#define IS_MMXEXT +#define SIMD_TYPE MMXEXT +#define FUNCT_NAME tomsmocompDScaler_MMXEXT #include "tomsmocomp/TomsMoCompAll.inc" -#undef IS_SSE -#undef SSE_TYPE +#undef IS_MMXEXT +#undef SIMD_TYPE #undef FUNCT_NAME G_DEFINE_TYPE (GstDeinterlaceMethodTomsMoComp, @@ -173,8 +173,8 @@ static void dim_class->nick = "tomsmocomp"; dim_class->latency = 1; - if (cpu_flags & OIL_IMPL_FLAG_SSE) { - dim_class->deinterlace_frame = tomsmocompDScaler_SSE; + if (cpu_flags & OIL_IMPL_FLAG_MMXEXT) { + dim_class->deinterlace_frame = tomsmocompDScaler_MMXEXT; } else if (cpu_flags & OIL_IMPL_FLAG_3DNOW) { dim_class->deinterlace_frame = tomsmocompDScaler_3DNOW; } else if (cpu_flags & OIL_IMPL_FLAG_MMX) { diff --git a/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc b/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc index 743b6e8d..daa38099 100644 --- a/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc +++ b/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc @@ -32,8 +32,8 @@ #undef SEFUNC #endif -#if defined(IS_SSE) -#define SEFUNC(x) Search_Effort_SSE_##x(int src_pitch, int dst_pitch, int rowsize, const unsigned char *pWeaveSrc, const unsigned char *pWeaveSrcP, unsigned char *pWeaveDest, int IsOdd, const unsigned char *pCopySrc, const unsigned char *pCopySrcP, int FldHeight) +#if defined(IS_MMXEXT) +#define SEFUNC(x) Search_Effort_MMXEXT_##x(int src_pitch, int dst_pitch, int rowsize, const unsigned char *pWeaveSrc, const unsigned char *pWeaveSrcP, unsigned char *pWeaveDest, int IsOdd, const unsigned char *pCopySrc, const unsigned char *pCopySrcP, int FldHeight) #elif defined(IS_3DNOW) #define SEFUNC(x) Search_Effort_3DNOW_##x(int src_pitch, int dst_pitch, int rowsize, const unsigned char *pWeaveSrc, const unsigned char *pWeaveSrcP, unsigned char *pWeaveDest, int IsOdd, const unsigned char *pCopySrc, const unsigned char *pCopySrcP, int FldHeight) #else @@ -49,8 +49,8 @@ #undef USE_STRANGE_BOB #undef SEFUNC -#if defined(IS_SSE) -#define SEFUNC(x) Search_Effort_SSE_##x(src_pitch, dst_pitch, rowsize, pWeaveSrc, pWeaveSrcP, pWeaveDest, IsOdd, pCopySrc, pCopySrcP, FldHeight) +#if defined(IS_MMXEXT) +#define SEFUNC(x) Search_Effort_MMXEXT_##x(src_pitch, dst_pitch, rowsize, pWeaveSrc, pWeaveSrcP, pWeaveDest, IsOdd, pCopySrc, pCopySrcP, FldHeight) #elif defined(IS_3DNOW) #define SEFUNC(x) Search_Effort_3DNOW_##x(src_pitch, dst_pitch, rowsize, pWeaveSrc, pWeaveSrcP, pWeaveDest, IsOdd, pCopySrc, pCopySrcP, FldHeight) #else @@ -231,7 +231,7 @@ void FUNCT_NAME(GstDeinterlaceMethod *d_method, GstDeinterlace2* object) } } -#ifdef ARCH_386 +#ifdef HAVE_CPU_I386 __asm__ __volatile__("emms"); #endif } diff --git a/gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h b/gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h index 24b0906f..156be892 100644 --- a/gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h +++ b/gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h @@ -3,15 +3,13 @@ #define USE_FOR_DSCALER -#define MyMemCopy pMyMemcpy - // Define a few macros for CPU dependent instructions. // I suspect I don't really understand how the C macro preprocessor works but // this seems to get the job done. // TRB 7/01 // BEFORE USING THESE YOU MUST SET: -// #define SSE_TYPE SSE (or MMX or 3DNOW) +// #define SIMD_TYPE MMXEXT (or MMX or 3DNOW) // some macros for pavgb instruction // V_PAVGB(mmr1, mmr2, mmr work register, smask) mmr2 may = mmrw if you can trash it @@ -23,21 +21,21 @@ "pand "smask", "mmr1"\n\t" \ "psrlw $1, "mmr1"\n\t" \ "paddusb "mmrw", "mmr1"\n\t" -#define V_PAVGB_SSE(mmr1, mmr2, mmrw, smask) "pavgb "mmr2", "mmr1"\n\t" +#define V_PAVGB_MMXEXT(mmr1, mmr2, mmrw, smask) "pavgb "mmr2", "mmr1"\n\t" #define V_PAVGB_3DNOW(mmr1, mmr2, mmrw, smask) "pavgusb "mmr2", "mmr1"\n\t" -#define V_PAVGB(mmr1, mmr2, mmrw, smask) V_PAVGB2(mmr1, mmr2, mmrw, smask, SSE_TYPE) -#define V_PAVGB2(mmr1, mmr2, mmrw, smask, ssetyp) V_PAVGB3(mmr1, mmr2, mmrw, smask, ssetyp) -#define V_PAVGB3(mmr1, mmr2, mmrw, smask, ssetyp) V_PAVGB_##ssetyp(mmr1, mmr2, mmrw, smask) +#define V_PAVGB(mmr1, mmr2, mmrw, smask) V_PAVGB2(mmr1, mmr2, mmrw, smask, SIMD_TYPE) +#define V_PAVGB2(mmr1, mmr2, mmrw, smask, simd_type) V_PAVGB3(mmr1, mmr2, mmrw, smask, simd_type) +#define V_PAVGB3(mmr1, mmr2, mmrw, smask, simd_type) V_PAVGB_##simd_type(mmr1, mmr2, mmrw, smask) // some macros for pmaxub instruction #define V_PMAXUB_MMX(mmr1, mmr2) \ "psubusb "mmr2", "mmr1"\n\t" \ "paddusb "mmr2", "mmr1"\n\t" -#define V_PMAXUB_SSE(mmr1, mmr2) "pmaxub "mmr2", "mmr1"\n\t" +#define V_PMAXUB_MMXEXT(mmr1, mmr2) "pmaxub "mmr2", "mmr1"\n\t" #define V_PMAXUB_3DNOW(mmr1, mmr2) V_PMAXUB_MMX(mmr1, mmr2) // use MMX version -#define V_PMAXUB(mmr1, mmr2) V_PMAXUB2(mmr1, mmr2, SSE_TYPE) -#define V_PMAXUB2(mmr1, mmr2, ssetyp) V_PMAXUB3(mmr1, mmr2, ssetyp) -#define V_PMAXUB3(mmr1, mmr2, ssetyp) V_PMAXUB_##ssetyp(mmr1, mmr2) +#define V_PMAXUB(mmr1, mmr2) V_PMAXUB2(mmr1, mmr2, SIMD_TYPE) +#define V_PMAXUB2(mmr1, mmr2, simd_type) V_PMAXUB3(mmr1, mmr2, simd_type) +#define V_PMAXUB3(mmr1, mmr2, simd_type) V_PMAXUB_##simd_type(mmr1, mmr2) // some macros for pminub instruction // V_PMINUB(mmr1, mmr2, mmr work register) mmr2 may NOT = mmrw @@ -46,20 +44,20 @@ "psubusb "mmr2", "mmrw"\n\t" \ "paddusb "mmrw", "mmr1"\n\t" \ "psubusb "mmrw", "mmr1"\n\t" -#define V_PMINUB_SSE(mmr1, mmr2, mmrw) "pminub "mmr2", "mmr1"\n\t" +#define V_PMINUB_MMXEXT(mmr1, mmr2, mmrw) "pminub "mmr2", "mmr1"\n\t" #define V_PMINUB_3DNOW(mmr1, mmr2, mmrw) V_PMINUB_MMX(mmr1, mmr2, mmrw) // use MMX version -#define V_PMINUB(mmr1, mmr2, mmrw) V_PMINUB2(mmr1, mmr2, mmrw, SSE_TYPE) -#define V_PMINUB2(mmr1, mmr2, mmrw, ssetyp) V_PMINUB3(mmr1, mmr2, mmrw, ssetyp) -#define V_PMINUB3(mmr1, mmr2, mmrw, ssetyp) V_PMINUB_##ssetyp(mmr1, mmr2, mmrw) +#define V_PMINUB(mmr1, mmr2, mmrw) V_PMINUB2(mmr1, mmr2, mmrw, SIMD_TYPE) +#define V_PMINUB2(mmr1, mmr2, mmrw, simd_type) V_PMINUB3(mmr1, mmr2, mmrw, simd_type) +#define V_PMINUB3(mmr1, mmr2, mmrw, simd_type) V_PMINUB_##simd_type(mmr1, mmr2, mmrw) // some macros for movntq instruction // V_MOVNTQ(mmr1, mmr2) #define V_MOVNTQ_MMX(mmr1, mmr2) "movq "mmr2", "mmr1"\n\t" #define V_MOVNTQ_3DNOW(mmr1, mmr2) "movq "mmr2", "mmr1"\n\t" -#define V_MOVNTQ_SSE(mmr1, mmr2) "movntq "mmr2", "mmr1"\n\t" -#define V_MOVNTQ(mmr1, mmr2) V_MOVNTQ2(mmr1, mmr2, SSE_TYPE) -#define V_MOVNTQ2(mmr1, mmr2, ssetyp) V_MOVNTQ3(mmr1, mmr2, ssetyp) -#define V_MOVNTQ3(mmr1, mmr2, ssetyp) V_MOVNTQ_##ssetyp(mmr1, mmr2) +#define V_MOVNTQ_MMXEXT(mmr1, mmr2) "movntq "mmr2", "mmr1"\n\t" +#define V_MOVNTQ(mmr1, mmr2) V_MOVNTQ2(mmr1, mmr2, SIMD_TYPE) +#define V_MOVNTQ2(mmr1, mmr2, simd_type) V_MOVNTQ3(mmr1, mmr2, simd_type) +#define V_MOVNTQ3(mmr1, mmr2, simd_type) V_MOVNTQ_##simd_type(mmr1, mmr2) // end of macros |