summaryrefslogtreecommitdiffstats
path: root/gst/deinterlace2/tvtime/tomsmocomp
diff options
context:
space:
mode:
authorDave Robillard <dave@drobilla.net>2009-05-26 19:10:44 -0400
committerDave Robillard <dave@drobilla.net>2009-05-26 19:10:44 -0400
commitb75a26657febaf86c4137b4d41c068926325e316 (patch)
tree65c161824169ac09bf8418244937aec6ab77a270 /gst/deinterlace2/tvtime/tomsmocomp
parent4e1d3bba9c21cb8bbfe70ffed953a8385fb7314d (diff)
parent8f70498c898a65d0938e3e104e91662ff5b693c3 (diff)
downloadgst-plugins-bad-b75a26657febaf86c4137b4d41c068926325e316.tar.gz
gst-plugins-bad-b75a26657febaf86c4137b4d41c068926325e316.tar.bz2
gst-plugins-bad-b75a26657febaf86c4137b4d41c068926325e316.zip
Merge branch 'master' of git://anongit.freedesktop.org/gstreamer/gst-plugins-bad into fdo
Diffstat (limited to 'gst/deinterlace2/tvtime/tomsmocomp')
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoop0A.inc15
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopBottom.inc174
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopEdgeA.inc11
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopEdgeA8.inc12
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA.inc10
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA2.inc5
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA6.inc11
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddAH.inc10
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddAH2.inc5
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopTop.inc254
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopVA.inc6
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/SearchLoopVAH.inc6
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/StrangeBob.inc435
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc241
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll2.inc243
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/WierdBob.inc286
-rw-r--r--gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h164
17 files changed, 0 insertions, 1888 deletions
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoop0A.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoop0A.inc
deleted file mode 100644
index b1d9aeca..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoop0A.inc
+++ /dev/null
@@ -1,15 +0,0 @@
-// -*- c++ -*-
-
-// Searches just the center pixel, in both the old
-// and new fields, but takes averages. This is an even
-// pixel address. Any chroma match will be used. (YUY2)
-// We best like finding 0 motion so we will bias everything we found previously
-// up by a little, and adjust later
-
-#ifdef IS_SSE2
- "paddusb "_ONES", %%xmm7\n\t" // bias toward no motion
-#else
- "paddusb "_ONES", %%mm7\n\t" // bias toward no motion
-#endif
-
- MERGE4PIXavg("(%%"XDI", %%"XCX")", "(%%"XSI", %%"XCX")") // center, in old and new
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopBottom.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopBottom.inc
deleted file mode 100644
index e1560353..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopBottom.inc
+++ /dev/null
@@ -1,174 +0,0 @@
-// -*- c++ -*-
-
-// Version for non-SSE2
-
-#ifndef IS_C
-
-#ifdef SKIP_SEARCH
- "movq %%mm6, %%mm0\n\t" // just use the results of our wierd bob
-#else
-
-
- // JA 9/Dec/2002
- // failed experiment
- // but leave in placeholder for me to play about
-#ifdef DONT_USE_STRANGE_BOB
- // Use the best weave if diffs less than 10 as that
- // means the image is still or moving cleanly
- // if there is motion we will clip which will catch anything
- "psubusb "_FOURS", %%mm7\n\t" // sets bits to zero if weave diff < 4
- "pxor %%mm0, %%mm0\n\t"
- "pcmpeqb %%mm0, %%mm7\n\t" // all ff where weave better, else 00
- "pcmpeqb %%mm7, %%mm0\n\t" // all ff where bob better, else 00
- "pand %%mm6, %%mm0\n\t" // use bob for these pixel values
- "pand %%mm5, %%mm7\n\t" // use weave for these
- "por %%mm7, %%mm0\n\t" // combine both
-#else
- // Use the better of bob or weave
- // pminub mm4, TENS // the most we care about
- V_PMINUB ("%%mm4", _TENS, "%%mm0") // the most we care about
-
- "psubusb %%mm4, %%mm7\n\t" // foregive that much from weave est?
- "psubusb "_FOURS", %%mm7\n\t" // bias it a bit toward weave
- "pxor %%mm0, %%mm0\n\t"
- "pcmpeqb %%mm0, %%mm7\n\t" // all ff where weave better, else 00
- "pcmpeqb %%mm7, %%mm0\n\t" // all ff where bob better, else 00
- "pand %%mm6, %%mm0\n\t" // use bob for these pixel values
- "pand %%mm5, %%mm7\n\t" // use weave for these
- "por %%mm7, %%mm0\n\t" // combine both
-#endif
-
-
- // pminub mm0, Max_Vals // but clip to catch the stray error
- V_PMINUB ("%%mm0", _Max_Vals, "%%mm1") // but clip to catch the stray error
- // pmaxub mm0, Min_Vals
- V_PMAXUB ("%%mm0", _Min_Vals)
-
-#endif
-
-
- MOVX" "_pDest", %%"XAX"\n\t"
-
-#ifdef USE_VERTICAL_FILTER
- "movq %%mm0, %%mm1\n\t"
- // pavgb mm0, qword ptr["XBX"]
- V_PAVGB ("%%mm0", "(%%"XBX")", "%%mm2", _ShiftMask)
- // movntq qword ptr["XAX"+"XDX"], mm0
- V_MOVNTQ ("(%"XAX", %%"XDX")", "%%mm0")
- // pavgb mm1, qword ptr["XBX"+"XCX"]
- V_PAVGB ("%%mm1", "(%%"XBX", %%"XCX")", "%%mm2", _ShiftMask)
- //FIXME: XDX or XAX!!
- "addq "_dst_pitchw", %%"XBX
- // movntq qword ptr["XAX"+"XDX"], mm1
- V_MOVNTQ ("(%%"XAX", %%"XDX")", "%%mm1")
-#else
-
- // movntq qword ptr["XAX"+"XDX"], mm0
- V_MOVNTQ ("(%%"XAX", %%"XDX")", "%%mm0")
-#endif
-
- LEAX" 8(%%"XDX"), %%"XDX"\n\t" // bump offset pointer
- CMPX" "_Last8", %%"XDX"\n\t" // done with line?
- "jb 1b\n\t" // y
-
- MOVX" "_oldbx", %%"XBX"\n\t"
-
- : /* no outputs */
-
- : "m"(pBob),
- "m"(src_pitch2),
- "m"(ShiftMask),
- "m"(pDest),
- "m"(dst_pitchw),
- "m"(Last8),
- "m"(pSrc),
- "m"(pSrcP),
- "m"(pBobP),
- "m"(DiffThres),
- "m"(Min_Vals),
- "m"(Max_Vals),
- "m"(FOURS),
- "m"(TENS),
- "m"(ONES),
- "m"(UVMask),
- "m"(Max_Mov),
- "m"(YMask),
- "m"(oldbx)
-
- : XAX, XCX, XDX, XSI, XDI,
- "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
-#ifdef __MMX__
- "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
-#endif
- "memory", "cc"
- );
-
- // adjust for next line
- pSrc += src_pitch2;
- pSrcP += src_pitch2;
- pDest += dst_pitch2;
- pBob += src_pitch2;
- pBobP += src_pitch2;
- }
-
- return 0;
-#else
-#ifdef SKIP_SEARCH
- out[0] = best[0]; // just use the results of our wierd bob
- out[1] = best[1];
-#else
- diff[0] = diff[0] - MIN (diff[0], 10) - 4;
- diff[1] = diff[1] - MIN (diff[1] - 10) - 4;
- if (diff[0] < 0)
- out[0] = weave[0];
- else
- out[0] = best[0];
-
- if (diff[1] < 0)
- out[1] = weave[1];
- else
- out[1] = best[1];
-
-
- out[0] = CLAMP (out[0], MinVals[0], MaxVals[0]);
- out[1] = CLAMP (out[1], MinVals[1], MaxVals[1]);
-#endif
-
-#ifdef USE_VERTICAL_FILTER
- pDest[x] = (out[0] + pBob[0]) / 2;
- pDest[x + dst_pitchw] = (pBob[src_pitch2] + out[0]) / 2;
- pDest[x + 1] = (out[1] + pBob[1]) / 2;
- pDest[x + 1 + dst_pitchw] = (pBob[src_pitch2 + 1] + out[1]) / 2;
-#else
- pDest[x] = out[0];
- pDest[x+1] = out[1];
-#endif
- pBob += 2;
- pBobP += 2;
- pSrc += 2;
- pSrcP += 2;
- }
- // adjust for next line
- pSrc = src_pitch2 * (y+1) + pWeaveSrc;
- pSrcP = src_pitch2 * (y+1) + pWeaveSrcP;
- pDest = dst_pitch2 * (y+1) + pWeaveDest + dst_pitch2;
-
-
- if (TopFirst)
- {
- pBob = pCopySrc + src_pitch2;
- pBobP = pCopySrcP + src_pitch2;
- }
- else
- {
- pBob = pCopySrc;
- pBobP = pCopySrcP;
- }
-
- pBob += src_pitch2 * (y+1);
- pBobP += src_pitch2 * (y+1);
- }
-
- return 0;
-
-#endif
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopEdgeA.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopEdgeA.inc
deleted file mode 100644
index 6208fe8c..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopEdgeA.inc
+++ /dev/null
@@ -1,11 +0,0 @@
-// -*- c++ -*-
-
-// Searches 2 pixel to the left and right, in both the old
-// and new fields, but takes averages. These are even
-// pixel addresses. Chroma match will be used. (YUY2)
- MERGE4PIXavg("-4(%%"XDI")", "4(%%"XSI", %%"XCX", 2)") // up left, down right
- MERGE4PIXavg("4(%%"XDI")", "-4(%%"XSI", %%"XCX", 2)") // up right, down left
- MERGE4PIXavg("-4(%%"XDI", %%"XCX")", "4(%%"XSI", %%"XCX")") // left, right
- MERGE4PIXavg("4(%%"XDI", %%"XCX")", "-4(%%"XSI", %%"XCX")") // right, left
- MERGE4PIXavg("-4(%%"XDI", %%"XCX", 2)", "4(%%"XSI")") // down left, up right
- MERGE4PIXavg("4(%%"XDI", %%"XCX", 2)", "-4(%%"XSI")") // down right, up left
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopEdgeA8.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopEdgeA8.inc
deleted file mode 100644
index 2841c3f6..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopEdgeA8.inc
+++ /dev/null
@@ -1,12 +0,0 @@
-// -*- c++ -*-
-
-// Searches 4 pixel to the left and right, in both the old
-// and new fields, but takes averages. These are even
-// pixel addresses. Chroma match will be used. (YUY2)
- MERGE4PIXavg("-8(%%"XDI")", "8(%%"XSI", %%"XCX", 2)") // up left, down right
- MERGE4PIXavg("8(%%"XDI")", "-8(%%"XSI", %%"XCX", 2)") // up right, down left
- MERGE4PIXavg("-8(%%"XDI", %%"XCX")", "8(%%"XSI", %%"XCX")") // left, right
- MERGE4PIXavg("8(%%"XDI", %%"XCX")", "-8(%%"XSI", %%"XCX")") // right, left
- MERGE4PIXavg("-8(%%"XDI", %%"XCX", 2)", "8(%%"XSI")") // down left, up right
- MERGE4PIXavg("8(%%"XDI", %%"XCX", 2)", "-8(%%"XSI")") // down right, up left
-
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA.inc
deleted file mode 100644
index ab5375f4..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA.inc
+++ /dev/null
@@ -1,10 +0,0 @@
-// -*- c++ -*-
-
-// Searches 1 pixel to the left and right, in both the old
-// and new fields, but takes averages. These are odd
-// pixel addresses. Any chroma match will not be used. (YUY2)
- MERGE4PIXavg("-2(%%"XDI")", "2(%%"XSI", %%"XCX", 2)") // up left, down right
- MERGE4PIXavg("2(%%"XDI")", "-2(%%"XSI", %%"XCX", 2)") // up right, down left
- MERGE4PIXavg("-2(%%"XDI", %%"XCX", 2)", "2(%%"XSI")") // down left, up right
- MERGE4PIXavg("2(%%"XDI", %%"XCX", 2)", "-2(%%"XSI")") // down right, up left
-#include "SearchLoopOddA2.inc"
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA2.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA2.inc
deleted file mode 100644
index fd3f6fb0..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA2.inc
+++ /dev/null
@@ -1,5 +0,0 @@
-// Searches 1 pixel to the left and right, in both the old
-// and new fields, but takes averages. These are odd
-// pixel addresses. Any chroma match will not be used. (YUY2)
- MERGE4PIXavg("-2(%%"XDI", %%"XCX")", "2(%%"XSI", %%"XCX")") // left, right
- MERGE4PIXavg("2(%%"XDI", %%"XCX")", "-2(%%"XSI", %%"XCX")") // right, left
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA6.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA6.inc
deleted file mode 100644
index cbae014e..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddA6.inc
+++ /dev/null
@@ -1,11 +0,0 @@
-// -*- c++ -*-
-
-// Searches 3 pixels to the left and right, in both the old
-// and new fields, but takes averages. These are odd
-// pixel addresses. Any chroma match will not be used. (YUY2)
- MERGE4PIXavg("-6(%%"XDI")", "6(%%"XSI", %%"XCX", 2)") // up left, down right
- MERGE4PIXavg("6(%%"XDI")", "-6(%%"XSI", %%"XCX", 2)") // up right, down left
- MERGE4PIXavg("-6(%%"XDI", %%"XCX")", "6(%%"XSI", %%"XCX")") // left, right
- MERGE4PIXavg("6(%%"XDI", %%"XCX")", "-6(%%"XSI", %%"XCX")") // right, left
- MERGE4PIXavg("-6(%%"XDI", %%"XCX", 2)", "6(%%"XSI")") // down left, up right
- MERGE4PIXavg("6(%%"XDI", %%"XCX", 2)", "-6(%%"XSI")") // down right, up left
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddAH.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddAH.inc
deleted file mode 100644
index e59e3c7e..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddAH.inc
+++ /dev/null
@@ -1,10 +0,0 @@
-// Searches 1 pixel to the left and right, in both the old
-// and new fields, but takes v-half pel averages. These are odd
-// pixel addresses. Any chroma match will not be used. (YUY2)
- __asm
- {
- MERGE4PIXavgH("XDI"-2, "XDI"+"XCX"-2, "XSI"+"XCX"+2, "XSI"+2*"XCX"+2) // up left, down right
- MERGE4PIXavgH("XDI"+2, "XDI"+"XCX"+2, "XSI"+"XCX"-2, "XSI"+2*"XCX"-2) // up right, down left
- MERGE4PIXavgH("XDI"+2*"XCX"-2, "XDI"+"XCX"-2, "XSI"+"XCX"+2, "XSI"+2) // down left, up right
- MERGE4PIXavgH("XDI"+2*"XCX"+2, "XDI"+"XCX"+2, "XSI"+"XCX"-2, "XSI"-2) // down right, up left
- }
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddAH2.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddAH2.inc
deleted file mode 100644
index cd7d812a..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopOddAH2.inc
+++ /dev/null
@@ -1,5 +0,0 @@
-// Searches 1 pixel to the left and right, in both the old
-// and new fields, but takes vertical averages. These are odd
-// pixel addresses. Any chroma match will not be used. (YUY2)
- MERGE4PIXavgH("-2(%%"XDI", %%"XCX")", "(%%"XDI", %%"XCX")", "(%%"XSI", %%"XCX")", "2(%%"XSI", %%"XCX")") // left, right
- MERGE4PIXavgH("2(%%"XDI", %%"XCX")", "(%%"XDI", %%"XCX")", "(%%"XSI", %%"XCX")", "-2(%%"XSI", %%"XCX")") // right, left
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopTop.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopTop.inc
deleted file mode 100644
index 9d6a490f..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopTop.inc
+++ /dev/null
@@ -1,254 +0,0 @@
-// -*- c++ -*-
-
-unsigned char* pDest;
-const unsigned char* pSrcP;
-const unsigned char* pSrc;
-const unsigned char* pBob;
-const unsigned char* pBobP;
-
-// long is int32 on ARCH_368, int64 on ARCH_AMD64. Declaring it this way
-// saves a lot of xor's to delete 64bit garbage.
-
-#if defined(DBL_RESIZE) || defined(USE_FOR_DSCALER)
-long src_pitch2 = src_pitch; // even & odd lines are not interleaved in DScaler
-#else
-long src_pitch2 = 2 * src_pitch; // even & odd lines are interleaved in Avisynth
-#endif
-
-
-long dst_pitch2 = 2 * dst_pitch;
-long y;
-
-long Last8;
-
- pSrc = pWeaveSrc; // points 1 weave line above
- pSrcP = pWeaveSrcP; // "
-
-#ifdef DBL_RESIZE
-
-#ifdef USE_VERTICAL_FILTER
- pDest = pWeaveDest + dst_pitch2;
-#else
- pDest = pWeaveDest + 3*dst_pitch;
-#endif
-
-#else
-
-#ifdef USE_VERTICAL_FILTER
- pDest = pWeaveDest + dst_pitch;
-#else
- pDest = pWeaveDest + dst_pitch2;
-#endif
-
-#endif
-
- if (TopFirst)
- {
- pBob = pCopySrc + src_pitch2; // remember one weave line just copied previously
- pBobP = pCopySrcP + src_pitch2;
- }
- else
- {
- pBob = pCopySrc;
- pBobP = pCopySrcP;
- }
-
-#ifndef IS_C
-
-#ifndef _pBob
-#define _pBob "%0"
-#define _src_pitch2 "%1"
-#define _ShiftMask "%2"
-#define _pDest "%3"
-#define _dst_pitchw "%4"
-#define _Last8 "%5"
-#define _pSrc "%6"
-#define _pSrcP "%7"
-#define _pBobP "%8"
-#define _DiffThres "%9"
-#define _Min_Vals "%10"
-#define _Max_Vals "%11"
-#define _FOURS "%12"
-#define _TENS "%13"
-#define _ONES "%14"
-#define _UVMask "%15"
-#define _Max_Mov "%16"
-#define _YMask "%17"
-#define _oldbx "%18"
-#endif
- Last8 = (rowsize-8);
-
- for (y=1; y < FldHeight-1; y++)
- {
- long dst_pitchw = dst_pitch; // local stor so asm can ref
- int64_t Max_Mov = 0x0404040404040404ull;
- int64_t DiffThres = 0x0f0f0f0f0f0f0f0full;
- int64_t YMask = 0x00ff00ff00ff00ffull; // keeps only luma
- int64_t UVMask = 0xff00ff00ff00ff00ull; // keeps only chroma
- int64_t TENS = 0x0a0a0a0a0a0a0a0aull;
- int64_t FOURS = 0x0404040404040404ull;
- int64_t ONES = 0x0101010101010101ull;
- int64_t Min_Vals = 0x0000000000000000ull;
- int64_t Max_Vals = 0x0000000000000000ull;
- int64_t ShiftMask = 0xfefffefffefffeffull;
-
- long oldbx;
-
- // pretend it's indented -->>
- __asm__ __volatile__
- (
- // Loop general reg usage
- //
- // XAX - pBobP, then pDest
- // XBX - pBob
- // XCX - src_pitch2
- // XDX - current offset
- // XDI - prev weave pixels, 1 line up
- // XSI - next weave pixels, 1 line up
-
- // Save "XBX" (-fPIC)
- MOVX" %%"XBX", "_oldbx"\n\t"
-
- // simple bob first 8 bytes
- MOVX" "_pBob", %%"XBX"\n\t"
- MOVX" "_src_pitch2", %%"XCX"\n\t"
-
-#ifdef USE_VERTICAL_FILTER
- "movq (%%"XBX"), %%mm0\n\t"
- "movq (%%"XBX", %%"XCX"), %%mm1\n\t" //, qword ptr["XBX"+"XCX"]
- "movq %%mm0, %%mm2\n\t"
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // halfway between
- V_PAVGB ("%%mm0", "%%mm2", "%%mm3", _ShiftMask) // 1/4 way
- V_PAVGB ("%%mm1", "%%mm2", "%%mm3", _ShiftMask) // 3/4 way
- MOVX" "_pDest", %%"XDI"\n\t"
- MOVX" "_dst_pitchw", %%"XAX"\n\t"
- V_MOVNTQ ("(%%"XDI")", "%%mm0")
- V_MOVNTQ ("(%%"XDI", %%"XAX")", "%%mm1") // qword ptr["XDI"+"XAX"], mm1
-
- // simple bob last 8 bytes
- MOVX" "_Last8", %%"XDX"\n\t"
- LEAX" (%%"XBX", %%"XDX"), %%"XSI"\n\t" // ["XBX"+"XDX"]
- "movq (%%"XSI"), %%mm0\n\t"
- "movq (%%"XSI", %%"XCX"), %%mm1\n\t" // qword ptr["XSI"+"XCX"]
- "movq %%mm0, %%mm2\n\t"
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // halfway between
- V_PAVGB ("%%mm0", "%%mm2", "%%mm3", _ShiftMask) // 1/4 way
- V_PAVGB ("%%mm1", "%%mm2", "%%mm3", _ShiftMask) // 3/4 way
- ADDX" %%"XDX", %%"XDI"\n\t" // last 8 bytes of dest
- V_MOVNTQ ("%%"XDI"", "%%mm0")
- V_MOVNTQ ("(%%"XDI", %%"XAX")", "%%mm1") // qword ptr["XDI"+"XAX"], mm1)
-
-#else
- "movq (%%"XBX"), %%mm0\n\t"
- // pavgb mm0, qword ptr["XBX"+"XCX"]
- V_PAVGB ("%%mm0", "(%%"XBX", %%"XCX")", "%%mm2", _ShiftMask) // qword ptr["XBX"+"XCX"], mm2, ShiftMask)
- MOVX" "_pDest", %%"XDI"\n\t"
- V_MOVNTQ ("(%%"XDI")", "%%mm0")
-
- // simple bob last 8 bytes
- MOVX" "_Last8", %%"XDX"\n\t"
- LEAX" (%%"XBX", %%"XDX"), %%"XSI"\n\t" //"XSI", ["XBX"+"XDX"]
- "movq (%%"XSI"), %%mm0\n\t"
- // pavgb mm0, qword ptr["XSI"+"XCX"]
- V_PAVGB ("%%mm0", "(%%"XSI", %%"XCX")", "%%mm2", _ShiftMask) // qword ptr["XSI"+"XCX"], mm2, ShiftMask)
- V_MOVNTQ ("(%%"XDI", %%"XDX")", "%%mm0") // qword ptr["XDI"+"XDX"], mm0)
-#endif
- // now loop and get the middle qwords
- MOVX" "_pSrc", %%"XSI"\n\t"
- MOVX" "_pSrcP", %%"XDI"\n\t"
- MOVX" $8, %%"XDX"\n\t" // curr offset longo all lines
-
- "1:\n\t"
- MOVX" "_pBobP", %%"XAX"\n\t"
- ADDX" $8, %%"XDI"\n\t"
- ADDX" $8, %%"XSI"\n\t"
- ADDX" $8, %%"XBX"\n\t"
- ADDX" %%"XDX", %%"XAX"\n\t"
-
-#ifdef USE_STRANGE_BOB
-#include "StrangeBob.inc"
-#else
-#include "WierdBob.inc"
-#endif
-
- // For non-SSE2:
- // through out most of the rest of this loop we will maintain
- // mm4 our min bob value
- // mm5 best weave pixels so far
- // mm6 our max Bob value
- // mm7 best weighted pixel ratings so far
-
- // We will keep a slight bias to using the weave pixels
- // from the current location, by rating them by the min distance
- // from the Bob value instead of the avg distance from that value.
- // our best and only rating so far
- "pcmpeqb %%mm7, %%mm7\n\t" // ffff, say we didn't find anything good yet
-
-#else
- Last8 = (rowsize - 4);
-
- for (y=1; y < FldHeight-1; y++)
- {
- #ifdef USE_STRANGE_BOB
- long DiffThres = 0x0f;
- #endif
-
- #ifndef SKIP_SEARCH
- long weave[2], MaxVals[2], MinVals[2];
- #endif
-
- long diff[2], best[2], avg[2], diff2[2], out[2], x;
-
-#ifdef USE_VERTICAL_FILTER
- pDest[0] = (3 * pBob[0] + pBob[src_pitch2]) / 4;
- pDest[1] = (3 * pBob[1] + pBob[src_pitch2 + 1]) / 4;
- pDest[2] = (3 * pBob[2] + pBob[src_pitch2 + 2]) / 4;
- pDest[3] = (3 * pBob[3] + pBob[src_pitch2 + 3]) / 4;
- pDest[dst_pitchw] = (pBob[0] + 3 * pBob[src_pitch2]) / 4;
- pDest[dst_pitchw + 1] = (pBob[1] + 3 * pBob[src_pitch2 + 1]) / 4;
- pDest[dst_pitchw + 2] = (pBob[2] + 3 * pBob[src_pitch2 + 2]) / 4;
- pDest[dst_pitchw + 3] = (pBob[3] + 3 * pBob[src_pitch2 + 3]) / 4;
-
- // simple bob last byte
- pDest[Last8] = (3 * pBob[Last8] + pBob[Last8 + src_pitch2]) / 4;
- pDest[Last8 + 1] = (3 * pBob[Last8 + 1] + pBob[Last8 + src_pitch2 + 1]) / 4;
- pDest[Last8 + 2] = (3 * pBob[Last8 + 2] + pBob[Last8 + src_pitch2 + 2]) / 4;
- pDest[Last8 + 3] = (3 * pBob[Last8 + 3] + pBob[Last8 + src_pitch2 + 3]) / 4;
- pDest[Last8 + src_pitch2] = (pBob[Last8] + 3 * pBob[Last8 + src_pitch2]) / 4;
- pDest[Last8 + src_pitch2 + 1] = (pBob[Last8 + 1] + 3 * pBob[Last8 + src_pitch2 + 1]) / 4;
- pDest[Last8 + src_pitch2 + 2] = (pBob[Last8 + 2] + 3 * pBob[Last8 + src_pitch2 + 2]) / 4;
- pDest[Last8 + src_pitch2 + 3] = (pBob[Last8 + 3] + 3 * pBob[Last8 + src_pitch2 + 3]) / 4;
-#else
- pDest[0] = (pBob[0] + pBob[src_pitch2 + 1]) / 2;
- pDest[1] = (pBob[1] + pBob[src_pitch2 + 1]) / 2;
- pDest[2] = (pBob[2] + pBob[src_pitch2 + 2]) / 2;
- pDest[3] = (pBob[3] + pBob[src_pitch2 + 3]) / 2;
-
- // simple bob last byte
- pDest[Last8] = (pBob[Last8] + pBob[Last8 + src_pitch2]) / 2;
- pDest[Last8 + 1] = (pBob[Last8 + 1] + pBob[Last8 + src_pitch2 + 1]) / 2;
- pDest[Last8 + 2] = (pBob[Last8 + 2] + pBob[Last8 + src_pitch2 + 2]) / 2;
- pDest[Last8 + 3] = (pBob[Last8 + 3] + pBob[Last8 + src_pitch2 + 3]) / 2;
-#endif
-
- pBob += 4;
- pBobP += 4;
- pSrc += 4;
- pSrcP += 4;
-
- for (x=4; x < Last8; x += 2) {
-
-#ifdef USE_STRANGE_BOB
-#include "StrangeBob.inc"
-#else
-#include "WierdBob.inc"
-#endif
-
- // We will keep a slight bias to using the weave pixels
- // from the current location, by rating them by the min distance
- // from the Bob value instead of the avg distance from that value.
- // our best and only rating so far
- diff[0] = diff[1] = 255;
-
-
-#endif
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopVA.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopVA.inc
deleted file mode 100644
index 3e3d19b5..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopVA.inc
+++ /dev/null
@@ -1,6 +0,0 @@
-// -*- c++ -*-
-
-// Searches the center vertical line above center and below, in both the old
-// and new fields, but takes averages. These are even pixel addresses.
- MERGE4PIXavg("(%%"XDI", %%"XCX", 2)", "(%%"XSI")") // down, up
- MERGE4PIXavg("(%%"XDI")", "(%%"XSI", %%"XCX", 2)") // up, down
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopVAH.inc b/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopVAH.inc
deleted file mode 100644
index 33155bc1..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/SearchLoopVAH.inc
+++ /dev/null
@@ -1,6 +0,0 @@
-// -*- c++ -*-
-
-// Searches the center vertical line above center and below, in both the old
-// and new fields, but takes averages. These are even pixel addresses.
- MERGE4PIXavgH("(%%"XDI", %%"XCX", 2)", "(%%"XDI", %%"XCX")", "(%%"XSI", %%"XCX")", "(%%"XSI")") // down, up
- MERGE4PIXavgH("(%%"XDI")", "(%%"XDI", %%"XCX")", "(%%"XSI", %%"XCX")", "(%%"XSI", %%"XCX", 2)") // up, down
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/StrangeBob.inc b/gst/deinterlace2/tvtime/tomsmocomp/StrangeBob.inc
deleted file mode 100644
index 45b4c865..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/StrangeBob.inc
+++ /dev/null
@@ -1,435 +0,0 @@
-// -*- c++ -*-
-
- // First, get and save our possible Bob values
- // Assume our pixels are layed out as follows with x the calc'd bob value
- // and the other pixels are from the current field
- //
- // j a b c k current field
- // x calculated line
- // m d e f n current field
- //
- // we calc the bob value luma value as:
- // if |j - n| < Thres && |a - m| > Thres
- // avg(j,n)
- // end if
- // if |k - m| < Thres && |c - n| > Thres
- // avg(k,m)
- // end if
- // if |c - d| < Thres && |b - f| > Thres
- // avg(c,d)
- // end if
- // if |a - f| < Thres && |b - d| > Thres
- // avg(a,f)
- // end if
- // if |b - e| < Thres
- // avg(b,e)
- // end if
- // pickup any thing not yet set with avg(b,e)
-
-#ifndef IS_C
-
- // j, n
- "pxor %%mm5, %%mm5\n\t"
- "pxor %%mm6, %%mm6\n\t"
- "pxor %%mm7, %%mm7\n\t"
-
- "movq -2(%%"XBX"), %%mm0\n\t" // value a from top left
- "movq -4(%%"XBX", %%"XCX"), %%mm1\n\t" // value m from bottom right
-
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(a,m)
-
- "psubusb "_DiffThres", %%mm3\n\t" // nonzero where abs(a,m) > Thres else 0
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where abs(a,m) < Thres, else 00
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where abs(a,m) > Thres, else 00
-
-
- "movq -4(%%"XBX"), %%mm0\n\t" // value j
- "movq 4(%%"XBX", %%"XCX"), %%mm1\n\t" // value n
- "movq %%mm0, %%mm2\n\t"
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(j,n)
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm0\n\t"
- "psubusb %%mm3, %%mm1\n\t"
- "por %%mm1, %%mm0\n\t" // abs(j,n)
-
- "movq %%mm0, %%mm1\n\t"
- "psubusb "_DiffThres", %%mm1\n\t" // nonzero where abs(j,n) > Thres else 0
- "pxor %%mm3, %%mm3\n\t"
- "pcmpeqb %%mm3, %%mm1\n\t" // now ff where abs(j,n) < Thres, else 00
-
- "pand %%mm4, %%mm1\n\t"
- "pand %%mm1, %%mm2\n\t"
- "pand %%mm1, %%mm0\n\t"
-
- "movq %%mm1, %%mm3\n\t"
- "pxor %%mm5, %%mm3\n\t"
- "pand %%mm3, %%mm6\n\t"
- "pand %%mm3, %%mm7\n\t"
- "pand %%mm3, %%mm5\n\t"
-
- "por %%mm1, %%mm5\n\t"
- "por %%mm2, %%mm6\n\t"
- "por %%mm0, %%mm7\n\t"
-
- // k & m
- "movq 2(%%"XBX"), %%mm0\n\t" // value c from top left
- "movq 4(%%"XBX", %%"XCX"), %%mm1\n\t" // value n from bottom right
-
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(c,n)
-
- "psubusb "_DiffThres", %%mm3\n\t" // nonzero where abs(c,n) > Thres else 0
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where abs(c,n) < Thres, else 00
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where abs(c,n) > Thres, else 00
-
-
- "movq 4(%%"XBX"), %%mm0\n\t" // value k
- "movq -4(%%"XBX", %%"XCX"), %%mm1\n\t" // value m
- "movq %%mm0, %%mm2\n\t"
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(k,m)
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm0\n\t"
- "psubusb %%mm3, %%mm1\n\t"
- "por %%mm1, %%mm0\n\t" // abs(k,m)
-
- "movq %%mm0, %%mm1\n\t"
- "psubusb "_DiffThres", %%mm1\n\t" // nonzero where abs(k,m) > Thres else 0
- "pxor %%mm3, %%mm3\n\t"
- "pcmpeqb %%mm3, %%mm1\n\t" // now ff where abs(k,m) < Thres, else 00
-
- "pand %%mm4, %%mm1\n\t"
-
- "pand %%mm1, %%mm2\n\t"
- "pand %%mm1, %%mm0\n\t"
-
- "movq %%mm1, %%mm3\n\t"
- "pxor %%mm5, %%mm3\n\t"
- "pand %%mm3, %%mm6\n\t"
- "pand %%mm3, %%mm7\n\t"
- "pand %%mm3, %%mm5\n\t"
-
- "por %%mm1, %%mm5\n\t"
- "por %%mm2, %%mm6\n\t"
- "por %%mm0, %%mm7\n\t"
-
-
- // c & d
- "movq (%%"XBX"), %%mm0\n\t" // value b from top left
- "movq 2(%%"XBX", %%"XCX"), %%mm1\n\t" // value f from bottom right
-
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(b,f)
-
- "psubusb "_DiffThres", %%mm3\n\t" // nonzero where abs(b,f) > Thres else 0
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where abs(b,f) < Thres, else 00
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where abs(b,f) > Thres, else 00
-
- "movq 2(%%"XBX"), %%mm0\n\t" // value c
- "movq -2(%%"XBX", %%"XCX"), %%mm1\n\t" // value d
- "movq %%mm0, %%mm2\n\t"
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(c,d)
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm0\n\t"
- "psubusb %%mm3, %%mm1\n\t"
- "por %%mm1, %%mm0\n\t" // abs(c,d)
-
- "movq %%mm0, %%mm1\n\t"
- "psubusb "_DiffThres", %%mm1\n\t" // nonzero where abs(c,d) > Thres else 0
- "pxor %%mm3, %%mm3\n\t"
- "pcmpeqb %%mm3, %%mm1\n\t" // now ff where abs(c,d) < Thres, else 00
-
- "pand %%mm4, %%mm1\n\t"
-
- "pand %%mm1, %%mm2\n\t"
- "pand %%mm1, %%mm0\n\t"
-
- "movq %%mm1, %%mm3\n\t"
- "pxor %%mm5, %%mm3\n\t"
- "pand %%mm3, %%mm6\n\t"
- "pand %%mm3, %%mm7\n\t"
- "pand %%mm3, %%mm5\n\t"
-
- "por %%mm1, %%mm5\n\t"
- "por %%mm2, %%mm6\n\t"
- "por %%mm0, %%mm7\n\t"
-
- // a & f
- "movq (%%"XBX"), %%mm0\n\t" // value b from top left
- "movq -2(%%"XBX", %%"XCX"), %%mm1\n\t" // value d from bottom right
-
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(b,d)
-
- "psubusb "_DiffThres", %%mm3\n\t" // nonzero where abs(b,d) > Thres else 0
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where abs(b,d) < Thres, else 00
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where abs(b,d) > Thres, else 00
-
- "movq -2(%%"XBX"), %%mm0\n\t" // value a
- "movq 2(%%"XBX", %%"XCX"), %%mm1\n\t" // value f
- "movq %%mm0, %%mm2\n\t"
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(a,f)
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm0\n\t"
- "psubusb %%mm3, %%mm1\n\t"
- "por %%mm1, %%mm0\n\t" // abs(a,f)
-
- "movq %%mm0, %%mm1\n\t"
- "psubusb "_DiffThres", %%mm1\n\t" // nonzero where abs(a,f) > Thres else 0
- "pxor %%mm3, %%mm3\n\t"
- "pcmpeqb %%mm3, %%mm1\n\t" // now ff where abs(a,f) < Thres, else 00
-
- "pand %%mm4, %%mm1\n\t"
-
- "pand %%mm1, %%mm2\n\t"
- "pand %%mm1, %%mm0\n\t"
-
- "movq %%mm1, %%mm3\n\t"
- "pxor %%mm5, %%mm3\n\t"
- "pand %%mm3, %%mm6\n\t"
- "pand %%mm3, %%mm7\n\t"
- "pand %%mm3, %%mm5\n\t"
-
- "por %%mm1, %%mm5\n\t"
- "por %%mm2, %%mm6\n\t"
- "por %%mm0, %%mm7\n\t"
-
- "pand "_YMask", %%mm5\n\t" // mask out chroma from here
- "pand "_YMask", %%mm6\n\t" // mask out chroma from here
- "pand "_YMask", %%mm7\n\t" // mask out chroma from here
-
- // b,e
- "movq (%%"XBX"), %%mm0\n\t" // value b from top
- "movq (%%"XBX", %%"XCX"), %%mm1\n\t" // value e from bottom
- "movq %%mm0, %%mm2\n\t"
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(b,e)
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm0\n\t"
- "psubusb %%mm3, %%mm1\n\t"
- "por %%mm1, %%mm0\n\t" // abs(b,e)
-
- "movq %%mm0, %%mm1\n\t"
- "psubusb "_DiffThres", %%mm1\n\t" // nonzero where abs(b,e) > Thres else 0
- "pxor %%mm3, %%mm3\n\t"
- "pcmpeqb %%mm3, %%mm1\n\t" // now ff where abs(b,e) < Thres, else 00
-
- "pand %%mm1, %%mm2\n\t"
- "pand %%mm1, %%mm0\n\t"
-
- "movq %%mm1, %%mm3\n\t"
- "pxor %%mm5, %%mm3\n\t"
- "pand %%mm3, %%mm6\n\t"
- "pand %%mm3, %%mm7\n\t"
- "pand %%mm3, %%mm5\n\t"
-
- "por %%mm1, %%mm5\n\t"
- "por %%mm2, %%mm6\n\t"
- "por %%mm0, %%mm7\n\t"
-
- // bob in any leftovers
- "movq (%%"XBX"), %%mm0\n\t" // value b from top
- "movq (%%"XBX", %%"XCX"), %%mm1\n\t" // value e from bottom
-
-
-// We will also calc here the max/min values to later limit comb
-// so the max excursion will not exceed the Max_Comb constant
-
-#ifdef SKIP_SEARCH
- "movq %%mm0, %%mm2\n\t"
-// pminub %%mm2, %%mm1
- V_PMINUB ("%%mm2", "%%mm1", "%%mm4")
-
-// pmaxub %%mm6, %%mm2 // clip our current results so far to be above this
- V_PMAXUB ("%%mm6", "%%mm2")
- "movq %%mm0, %%mm2\n\t"
- V_PMAXUB ("%%mm2", "%%mm1")
-// pminub %%mm6, %%mm2 // clip our current results so far to be below this
- V_PMINUB ("%%mm6", "%%mm2", "%%mm4")
-
-#else
- "movq %%mm0, %%mm2\n\t"
- "movq (%%"XAX"), %%mm4\n\t"
- "psubusb %%mm4, %%mm2\n\t"
- "psubusb %%mm0, %%mm4\n\t"
- "por %%mm2, %%mm4\n\t" // abs diff
-
- "movq %%mm1, %%mm2\n\t"
- "movq (%%"XAX", %%"XCX"), %%mm3\n\t"
- "psubusb %%mm3, %%mm2\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "por %%mm2, %%mm3\n\t" // abs diff
-// pmaxub %%mm3, %%mm4 // top or bottom pixel moved most
- V_PMAXUB ("%%mm3", "%%mm4") // top or bottom pixel moved most
- "psubusb "_DiffThres", %%mm3\n\t" // moved more than allowed? or goes to 0?
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where low motion, else high motion
-
- "movq %%mm0, %%mm2\n\t"
-// pminub %%mm2, %%mm1
- V_PMINUB ("%%mm2", "%%mm1", "%%mm4")
-
-// pmaxub %%mm6, %%mm2 // clip our current results so far to be above this
- V_PMAXUB ("%%mm6", "%%mm2")
-
- "psubusb %%mm3, %%mm2\n\t" // maybe decrease it to 0000.. if no surround motion
- "movq %%mm2, "_Min_Vals"\n\t"
-
- "movq %%mm0, %%mm2\n\t"
- V_PMAXUB ("%%mm2", "%%mm1")
-// pminub %%mm6, %%mm2 // clip our current results so far to be below this
- V_PMINUB ("%%mm6", "%%mm2", "%%mm4")
- "paddusb %%mm3, %%mm2\n\t" // maybe increase it to ffffff if no surround motion
- "movq %%mm2, "_Max_Vals"\n\t"
-#endif
-
- "movq %%mm0, %%mm2\n\t"
-// pavgb %%mm2, %%mm1 // avg(b,e)
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(b,e)
-
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(b,e)
- "movq %%mm3, %%mm1\n\t" // keep copy of diffs
-
- "pxor %%mm4, %%mm4\n\t"
- "psubusb %%mm7, %%mm3\n\t" // nonzero where new weights bigger, else 0
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where new better, else 00
- "pcmpeqb %%mm0, %%mm0\n\t"
- "pandn %%mm0, %%mm5\n\t"
- "por %%mm5, %%mm3\n\t"
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where old better, else 00
-
- "pand %%mm3, %%mm1\n\t"
- "pand %%mm3, %%mm2\n\t"
-
- "pand %%mm4, %%mm6\n\t"
- "pand %%mm4, %%mm7\n\t"
-
- "por %%mm2, %%mm6\n\t" // our x2 value
- "por %%mm1, %%mm7\n\t" // our x2 diffs
- "movq %%mm7, %%mm4\n\t" // save as bob uncertainty indicator
-
-#else
-
- diff[0] = -1;
- diff[1] = -1;
- best[0] = 0;
- best[1] = 0;
- // j, n
- if (ABS (pBob[-2] - pBob[src_pitch2 - 4]) < DiffThres &&
- ABS (pBob[-4] - pBob[src_pitch2 + 4]) > DiffThres) {
- best[0] = (pBob[-2] + pBob[src_pitch2 - 4]) / 2;
- diff[0] = ABS (pBob[-2] - pBob[src_pitch2 - 4]);
- }
- if (ABS (pBob[-1] - pBob[src_pitch2 - 3]) < DiffThres &&
- ABS (pBob[-3] - pBob[src_pitch2 + 5]) > DiffThres) {
- best[1] = (pBob[-1] + pBob[src_pitch2 - 3]) / 2;
- diff[1] = ABS (pBob[-1] - pBob[src_pitch2 - 3]);
- }
-
- // k & m
- if (ABS (pBob[2] - pBob[src_pitch2 + 4]) < DiffThres &&
- ABS (pBob[4] - pBob[src_pitch2 - 4]) > DiffThres) {
- best[0] = (pBob[4] + pBob[src_pitch2 - 4]) / 2;
- diff[0] = ABS (pBob[4] - pBob[src_pitch2 - 4]);
- }
-
- if (ABS (pBob[3] - pBob[src_pitch2 + 5]) < DiffThres &&
- ABS (pBob[5] - pBob[src_pitch2 - 3]) > DiffThres) {
- best[1] = (pBob[5] + pBob[src_pitch2 - 3]) / 2;
- diff[1] = ABS (pBob[5] - pBob[src_pitch2 - 3]);
- }
-
- // c & d
- if (ABS (pBob[0] - pBob[src_pitch2 + 2]) < DiffThres &&
- ABS (pBob[2] - pBob[src_pitch2 - 2]) > DiffThres) {
- best[0] = (pBob[2] + pBob[src_pitch2 - 2]) / 2;
- diff[0] = ABS (pBob[2] - pBob[src_pitch2 - 2]);
- }
-
- if (ABS (pBob[1] - pBob[src_pitch2 + 3]) < DiffThres &&
- ABS (pBob[3] - pBob[src_pitch2 - 1]) > DiffThres) {
- best[1] = (pBob[3] + pBob[src_pitch2 - 1]) / 2;
- diff[1] = ABS (pBob[3] - pBob[src_pitch2 - 1]);
- }
-
- // a & f
- if (ABS (pBob[0] - pBob[src_pitch2 - 2]) < DiffThres &&
- ABS (pBob[-2] - pBob[src_pitch2 + 2]) > DiffThres) {
- best[0] = (pBob[-2] + pBob[src_pitch2 + 2]) / 2;
- diff[0] = ABS (pBob[-2] - pBob[src_pitch2 + 2]);
- }
-
- if (ABS (pBob[1] - pBob[src_pitch2 - 1]) < DiffThres &&
- ABS (pBob[-1] - pBob[src_pitch2 + 3]) > DiffThres) {
- best[1] = (pBob[-1] + pBob[src_pitch2 + 3]) / 2;
- diff[1] = ABS (pBob[-1] - pBob[src_pitch2 + 3]);
- }
-
- // b,e
- if (ABS (pBob[0] - pBob[src_pitch2]) < DiffThres) {
- best[0] = (pBob[0] + pBob[src_pitch2]) / 2;
- diff[0] = ABS (pBob[0] - pBob[src_pitch2]);
- }
-
- if (ABS (pBob[1] - pBob[src_pitch2 + 1]) < DiffThres) {
- best[1] = (pBob[1] + pBob[src_pitch2 + 1]) / 2;
- diff[1] = ABS (pBob[1] - pBob[src_pitch2 + 1]);
- }
-
-
-// We will also calc here the max/min values to later limit comb
-// so the max excursion will not exceed the Max_Comb constant
-
-#ifdef SKIP_SEARCH
- best[0] = CLAMP (best[0], MIN (pBob[src_pitch2], pBob[0]), MAX (pBob[src_pitch2], pBob[0]));
- best[1] = CLAMP (best[1], MIN (pBob[src_pitch2 + 1], pBob[1]), MAX (pBob[src_pitch2 + 1], pBob[1]));
-#else
- mov[0] = MAX (ABS (pBob[0] - pBobP[0]), ABS (pBob[src_pitch2] - pBobP[src_pitch2]));
- mov[1] = MAX (ABS (pBob[1] - pBobP[1]), ABS (pBob[src_pitch2 + 1] - pBobP[src_pitch2 + 1]));
-
- MinVals[0] = 0;
- MinVals[1] = 0;
- MaxVals[0] = 255;
- MaxVals[1] = 255;
- if (mov[0] > DiffThres) {
- MinVals[0] = MAX (MIN (pBob[0], pBob[src_pitch2]), best[0]);
- MaxVals[0] = MIN (MAX (pBob[0], pBob[src_pitch2]), best[0]);
- }
-
- if (mov[1] > DiffThres) {
- MinVals[1] = MAX (MIN (pBob[1], pBob[src_pitch2+1]), best[1]);
- MaxVals[1] = MIN (MAX (pBob[1], pBob[src_pitch2+1]), best[1]);
- }
-
- best[0] = CLAMP (best[0], MIN (pBob[src_pitch2], pBob[0]), MAX (pBob[src_pitch2], pBob[0]));
- best[1] = CLAMP (best[1], MIN (pBob[src_pitch2 + 1], pBob[1]), MAX (pBob[src_pitch2 + 1], pBob[1]));
-#endif
- avg[0] = (pBob[src_pitch2] + pBob[0]) / 2;
- avg[1] = (pBob[src_pitch2 + 1] + pBob[1]) / 2;
- diff2[0] = ABS (pBob[src_pitch2 + 1] - pBob[1]);
- diff2[1] = ABS (pBob[src_pitch2 + 1] - pBob[1]);
-
- if (diff[0] == -1 || diff2[0] < diff[0]) {
- best[0] = avg[0];
- diff[0] = diff2[0];
- }
-
- if (diff[1] == -1 || diff2[1] < diff[1]) {
- best[1] = avg[1];
- diff[1] = diff2[1];
- }
-#endif
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc b/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc
deleted file mode 100644
index 89ed39e4..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll.inc
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- * GStreamer
- * Copyright (c) 2002 Tom Barry All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License along with this library; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 02111-1307, USA.
- */
-
-/*
- * Relicensed for GStreamer from GPL to LGPL with permit from Tom Barry.
- * See: http://bugzilla.gnome.org/show_bug.cgi?id=163578
- */
-
-
-#ifndef TopFirst
-#define TopFirst IsOdd
-#endif
-
-#ifdef SEFUNC
-#undef SEFUNC
-#endif
-
-#if defined(IS_MMXEXT)
-#define SEFUNC(x) Search_Effort_MMXEXT_##x(int src_pitch, int dst_pitch, int rowsize, const unsigned char *pWeaveSrc, const unsigned char *pWeaveSrcP, unsigned char *pWeaveDest, int IsOdd, const unsigned char *pCopySrc, const unsigned char *pCopySrcP, int FldHeight)
-#elif defined(IS_3DNOW)
-#define SEFUNC(x) Search_Effort_3DNOW_##x(int src_pitch, int dst_pitch, int rowsize, const unsigned char *pWeaveSrc, const unsigned char *pWeaveSrcP, unsigned char *pWeaveDest, int IsOdd, const unsigned char *pCopySrc, const unsigned char *pCopySrcP, int FldHeight)
-#elif defined(IS_MMX)
-#define SEFUNC(x) Search_Effort_MMX_##x(int src_pitch, int dst_pitch, int rowsize, const unsigned char *pWeaveSrc, const unsigned char *pWeaveSrcP, unsigned char *pWeaveDest, int IsOdd, const unsigned char *pCopySrc, const unsigned char *pCopySrcP, int FldHeight)
-#else
-#define SEFUNC(x) Search_Effort_C_##x(int src_pitch, int dst_pitch, int rowsize, const unsigned char *pWeaveSrc, const unsigned char *pWeaveSrcP, unsigned char *pWeaveDest, int IsOdd, const unsigned char *pCopySrc, const unsigned char *pCopySrcP, int FldHeight)
-#endif
-
-#include "TomsMoCompAll2.inc"
-
-#define USE_STRANGE_BOB
-
-#include "TomsMoCompAll2.inc"
-
-#undef USE_STRANGE_BOB
-
-#undef SEFUNC
-#if defined(IS_MMXEXT)
-#define SEFUNC(x) Search_Effort_MMXEXT_##x(src_pitch, dst_pitch, rowsize, pWeaveSrc, pWeaveSrcP, pWeaveDest, IsOdd, pCopySrc, pCopySrcP, FldHeight)
-#elif defined(IS_3DNOW)
-#define SEFUNC(x) Search_Effort_3DNOW_##x(src_pitch, dst_pitch, rowsize, pWeaveSrc, pWeaveSrcP, pWeaveDest, IsOdd, pCopySrc, pCopySrcP, FldHeight)
-#elif defined(IS_MMX)
-#define SEFUNC(x) Search_Effort_MMX_##x(src_pitch, dst_pitch, rowsize, pWeaveSrc, pWeaveSrcP, pWeaveDest, IsOdd, pCopySrc, pCopySrcP, FldHeight)
-#else
-#define SEFUNC(x) Search_Effort_C_##x(src_pitch, dst_pitch, rowsize, pWeaveSrc, pWeaveSrcP, pWeaveDest, IsOdd, pCopySrc, pCopySrcP, FldHeight)
-#endif
-
-void FUNCT_NAME(GstDeinterlaceMethod *d_method, GstDeinterlace2* object, GstBuffer *outbuf)
-{
- GstDeinterlaceMethodTomsMoComp *self = GST_DEINTERLACE_METHOD_TOMSMOCOMP (d_method);
- long SearchEffort = self->search_effort;
- int UseStrangeBob = self->strange_bob;
- int IsOdd;
- const unsigned char *pWeaveSrc;
- const unsigned char *pWeaveSrcP;
- unsigned char *pWeaveDest;
- const unsigned char *pCopySrc;
- const unsigned char *pCopySrcP;
- unsigned char *pCopyDest;
- int src_pitch;
- int dst_pitch;
- int rowsize;
- int FldHeight;
-
- /* double stride do address just every odd/even scanline */
- src_pitch = object->field_stride;
- dst_pitch = object->row_stride;
- rowsize = object->row_stride;
- FldHeight = object->field_height;
-
- pCopySrc = GST_BUFFER_DATA(object->field_history[object->history_count-1].buf);
- pCopySrcP = GST_BUFFER_DATA(object->field_history[object->history_count-3].buf);
- pWeaveSrc = GST_BUFFER_DATA(object->field_history[object->history_count-2].buf);
- pWeaveSrcP = GST_BUFFER_DATA(object->field_history[object->history_count-4].buf);
-
- /* use bottom field and interlace top field */
- if (object->field_history[object->history_count-2].flags == PICTURE_INTERLACED_BOTTOM) {
- IsOdd = 1;
-
- // if we have an odd field we copy an even field and weave an odd field
- pCopyDest = GST_BUFFER_DATA(outbuf);
- pWeaveDest = pCopyDest + dst_pitch;
- }
- /* do it vice verca */
- else {
-
- IsOdd = 0;
- // if we have an even field we copy an odd field and weave an even field
- pCopyDest = GST_BUFFER_DATA(outbuf) + dst_pitch;
- pWeaveDest = GST_BUFFER_DATA(outbuf);
- }
-
-
- // copy 1st and last weave lines
- Fieldcopy(pWeaveDest, pCopySrc, rowsize,
- 1, dst_pitch*2, src_pitch);
- Fieldcopy(pWeaveDest+(FldHeight-1)*dst_pitch*2,
- pCopySrc+(FldHeight-1)*src_pitch, rowsize,
- 1, dst_pitch*2, src_pitch);
-
-#ifdef USE_VERTICAL_FILTER
- // Vertical Filter currently not implemented for DScaler !!
- // copy 1st and last lines the copy field
- Fieldcopy(pCopyDest, pCopySrc, rowsize,
- 1, dst_pitch*2, src_pitch);
- Fieldcopy(pCopyDest+(FldHeight-1)*dst_pitch*2,
- pCopySrc+(FldHeight-1)*src_pitch, rowsize,
- 1, dst_pitch*2, src_pitch);
-#else
-
- // copy all of the copy field
- Fieldcopy(pCopyDest, pCopySrc, rowsize,
- FldHeight, dst_pitch*2, src_pitch);
-#endif
- // then go fill in the hard part, being variously lazy depending upon
- // SearchEffort
-
- if(!UseStrangeBob) {
- if (SearchEffort == 0)
- {
- SEFUNC(0);
- }
- else if (SearchEffort <= 1)
- {
- SEFUNC(1);
- }
- /* else if (SearchEffort <= 2)
- {
- SEFUNC(2);
- }
- */
- else if (SearchEffort <= 3)
- {
- SEFUNC(3);
- }
- else if (SearchEffort <= 5)
- {
- SEFUNC(5);
- }
- else if (SearchEffort <= 9)
- {
- SEFUNC(9);
- }
- else if (SearchEffort <= 11)
- {
- SEFUNC(11);
- }
- else if (SearchEffort <= 13)
- {
- SEFUNC(13);
- }
- else if (SearchEffort <= 15)
- {
- SEFUNC(15);
- }
- else if (SearchEffort <= 19)
- {
- SEFUNC(19);
- }
- else if (SearchEffort <= 21)
- {
- SEFUNC(21);
- }
- else
- {
- SEFUNC(Max);
- }
- }
- else
- {
- if (SearchEffort == 0)
- {
- SEFUNC(0SB);
- }
- else if (SearchEffort <= 1)
- {
- SEFUNC(1SB);
- }
- /* else if (SearchEffort <= 2)
- {
- SEFUNC(2SB);
- }
- */
- else if (SearchEffort <= 3)
- {
- SEFUNC(3SB);
- }
- else if (SearchEffort <= 5)
- {
- SEFUNC(5SB);
- }
- else if (SearchEffort <= 9)
- {
- SEFUNC(9SB);
- }
- else if (SearchEffort <= 11)
- {
- SEFUNC(11SB);
- }
- else if (SearchEffort <= 13)
- {
- SEFUNC(13SB);
- }
- else if (SearchEffort <= 15)
- {
- SEFUNC(15SB);
- }
- else if (SearchEffort <= 19)
- {
- SEFUNC(19SB);
- }
- else if (SearchEffort <= 21)
- {
- SEFUNC(21SB);
- }
- else
- {
- SEFUNC(MaxSB);
- }
- }
-
-#if defined(BUILD_X86_ASM) && !defined(IS_C)
- __asm__ __volatile__("emms");
-#endif
-}
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll2.inc b/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll2.inc
deleted file mode 100644
index f6344eab..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/TomsMoCompAll2.inc
+++ /dev/null
@@ -1,243 +0,0 @@
-// -*- c++ -*-
-
-#ifdef SEARCH_EFFORT_FUNC
-#undef SEARCH_EFFORT_FUNC
-#endif
-
-#ifdef USE_STRANGE_BOB
-#define SEARCH_EFFORT_FUNC(n) SEFUNC(n##SB)
-#else
-#define SEARCH_EFFORT_FUNC(n) SEFUNC(n)
-#endif
-
-static inline int SEARCH_EFFORT_FUNC(0) // we don't try at all ;-)
-{
- //see Search_Effort_Max() for comments
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-}
-
-static inline int SEARCH_EFFORT_FUNC(1)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see Search_Effort_Max() for comments
-#include "SearchLoopTop.inc"
- RESET_CHROMA // pretend chroma diffs was 255 each
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-static inline int SEARCH_EFFORT_FUNC(3)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see Search_Effort_Max() for comments
-#include "SearchLoopTop.inc"
-#include "SearchLoopOddA2.inc"
- RESET_CHROMA // pretend chroma diffs was 255 each
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-static inline int SEARCH_EFFORT_FUNC(5)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see Search_Effort_Max() for comments
-#include "SearchLoopTop.inc"
-#include "SearchLoopOddA2.inc"
-#include "SearchLoopOddAH2.inc"
- RESET_CHROMA // pretend chroma diffs was 255 each
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-// 3x3 search
-static inline int SEARCH_EFFORT_FUNC(9)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see SearchEffortMax() for comments
-#include "SearchLoopTop.inc"
-#include "SearchLoopOddA.inc"
- RESET_CHROMA // pretend chroma diffs was 255 each
-#include "SearchLoopVA.inc"
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-// Search 9 with 2 H-half pels added
-static inline int SEARCH_EFFORT_FUNC(11)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see SearchEffortMax() for comments
-#include "SearchLoopTop.inc"
-#include "SearchLoopOddA.inc"
-#include "SearchLoopOddAH2.inc"
- RESET_CHROMA // pretend chroma diffs was 255 each
-#include "SearchLoopVA.inc"
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-// Search 11 with 2 V-half pels added
-static inline int SEARCH_EFFORT_FUNC(13)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see SearchEffortMax() for comments
-#include "SearchLoopTop.inc"
-#include "SearchLoopOddA.inc"
-#include "SearchLoopOddAH2.inc"
- RESET_CHROMA // pretend chroma diffs was 255 each
-#include "SearchLoopVAH.inc"
-#include "SearchLoopVA.inc"
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-// 5x3
-static inline int SEARCH_EFFORT_FUNC(15)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see SearchEffortMax() for comments
-#include "SearchLoopTop.inc"
-#include "SearchLoopOddA.inc"
- RESET_CHROMA // pretend chroma diffs was 255 each
-#include "SearchLoopEdgeA.inc"
-#include "SearchLoopVA.inc"
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-// 5x3 + 4 half pels
-static inline int SEARCH_EFFORT_FUNC(19)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see SearchEffortMax() for comments
-#include "SearchLoopTop.inc"
-#include "SearchLoopOddA.inc"
-#include "SearchLoopOddAH2.inc"
- RESET_CHROMA // pretend chroma diffs was 255 each
-#include "SearchLoopEdgeA.inc"
-#include "SearchLoopVAH.inc"
-#include "SearchLoopVA.inc"
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-// Handle one 4x1 block of pixels
-// Search a 7x3 area, no half pels
-
-static inline int SEARCH_EFFORT_FUNC(21)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see SearchLoopTop.inc for comments
-#include "SearchLoopTop.inc"
-
- // odd addresses -- the pixels at odd address wouldn't generate
- // good luma values but we will mask those off
-
-#include "SearchLoopOddA6.inc" // 4 odd v half pels, 3 to left & right
-#include "SearchLoopOddA.inc" // 6 odd pels, 1 to left & right
-
- RESET_CHROMA // pretend chroma diffs was 255 each
-
- // even addresses -- use both luma and chroma from these
- // search averages of 2 pixels left and right
-#include "SearchLoopEdgeA.inc"
- // search vertical line and averages, -1,0,+1
-#include "SearchLoopVA.inc"
- // blend our results and loop
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-// Handle one 4x1 block of pixels
-// Search a 9x3 area, no half pels
-static inline int SEARCH_EFFORT_FUNC(Max)
-{
-#ifdef IS_C
-#define SKIP_SEARCH
-#include "SearchLoopTop.inc"
-#include "SearchLoopBottom.inc"
-#undef SKIP_SEARCH
-#else
- //see SearchLoopTop.inc for comments
-#include "SearchLoopTop.inc"
-
- // odd addresses -- the pixels at odd address wouldn't generate
- // good luma values but we will mask those off
-
-#include "SearchLoopOddA6.inc" // 4 odd v half pels, 3 to left & right
-#include "SearchLoopOddA.inc" // 6 odd pels, 1 to left & right
-
- RESET_CHROMA // pretend chroma diffs was 255 each
-
- // even addresses -- use both luma and chroma from these
- // search averages of 4 pixels left and right
-#include "SearchLoopEdgeA8.inc"
- // search averages of 2 pixels left and right
-#include "SearchLoopEdgeA.inc"
- // search vertical line and averages, -1,0,+1
-#include "SearchLoopVA.inc"
- // blend our results and loop
-#include "SearchLoop0A.inc"
-#include "SearchLoopBottom.inc"
-#endif
-}
-
-#undef SEARCH_EFFORT_FUNC
-
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/WierdBob.inc b/gst/deinterlace2/tvtime/tomsmocomp/WierdBob.inc
deleted file mode 100644
index f4bbb830..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/WierdBob.inc
+++ /dev/null
@@ -1,286 +0,0 @@
-// -*- c++ -*-
-
- // First, get and save our possible Bob values
- // Assume our pixels are layed out as follows with x the calc'd bob value
- // and the other pixels are from the current field
- //
- // j a b c k current field
- // x calculated line
- // m d e f n current field
- //
- // we calc the bob value as:
- // x2 = either avg(a,f), avg(c,d), avg(b,e), avg(j,n), or avg(k,m)
-
- // selected for the smallest of abs(a,f), abs(c,d), or abs(b,e), etc.
-
-#ifndef IS_C
- // a,f
- "movq -2(%%"XBX"), %%mm0\n\t" // value a from top left
- "movq 2(%%"XBX", %%"XCX"), %%mm1\n\t" // value f from bottom right
- "movq %%mm0, %%mm6\n\t"
-// pavgb %%mm6, %%mm1 // avg(a,f), also best so far
- V_PAVGB ("%%mm6", "%%mm1", "%%mm7", _ShiftMask) // avg(a,f), also best so far
- "movq %%mm0, %%mm7\n\t"
- "psubusb %%mm1, %%mm7\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm7\n\t" // abs diff, also best so far
-
- // c,d
- "movq 2(%%"XBX"), %%mm0\n\t" // value a from top left
- "movq -2(%%"XBX", %%"XCX"), %%mm1\n\t" // value f from bottom right
- "movq %%mm0, %%mm2\n\t"
-// pavgb %%mm2, %%mm1 // avg(c,d)
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(c,d)
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(c,d)
- "movq %%mm3, %%mm1\n\t" // keep copy
-
- "psubusb %%mm7, %%mm3\n\t" // nonzero where new weights bigger, else 0
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where new better, else 00
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where old better, else 00
-
- "pand %%mm3, %%mm1\n\t" // keep only better new avg and abs
- "pand %%mm3, %%mm2\n\t"
-
- "pand %%mm4, %%mm6\n\t"
- "pand %%mm4, %%mm7\n\t"
-
- "por %%mm2, %%mm6\n\t" // and merge new & old vals keeping best
- "por %%mm1, %%mm7\n\t"
- "por "_UVMask", %%mm7\n\t" // but we know chroma is worthless so far
- "pand "_YMask", %%mm5\n\t" // mask out chroma from here also
-
- // j,n
- "movq -4(%%"XBX"), %%mm0\n\t" // value j from top left
- "movq 4(%%"XBX", %%"XCX"), %%mm1\n\t" // value n from bottom right
- "movq %%mm0, %%mm2\n\t"
-// pavgb %%mm2, %%mm1 // avg(j,n)
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(j,n)
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(j-n)
- "movq %%mm3, %%mm1\n\t" // keep copy
-
- "psubusb %%mm7, %%mm3\n\t" // nonzero where new weights bigger, else 0
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where new better, else 00
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where old better, else 00
-
- "pand %%mm3, %%mm1\n\t" // keep only better new avg and abs
- "pand %%mm2, %%mm3\n\t"
-
- "pand %%mm4, %%mm6\n\t"
- "pand %%mm4, %%mm7\n\t"
-
- "por %%mm3, %%mm6\n\t" // and merge new & old vals keeping best
- "por %%mm1, %%mm7\n\t" // "
-
- // k, m
- "movq 4(%%"XBX"), %%mm0\n\t" // value k from top right
- "movq -4(%%"XBX", %%"XCX"), %%mm1\n\t" // value n from bottom left
- "movq %%mm0, %%mm4\n\t"
-// pavgb %%mm4, %%mm1 // avg(k,m)
- V_PAVGB ("%%mm4", "%%mm1", "%%mm3", _ShiftMask) // avg(k,m)
-
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(k,m)
- "movq %%mm3, %%mm1\n\t" // keep copy
-
- "movq %%mm4, %%mm2\n\t" // avg(k,m)
-
- "psubusb %%mm7, %%mm3\n\t" // nonzero where new weights bigger, else 0
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where new better, else 00
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where old better, else 00
-
- "pand %%mm3, %%mm1\n\t" // keep only better new avg and abs
- "pand %%mm2, %%mm3\n\t"
-
- "pand %%mm4, %%mm6\n\t"
- "pand %%mm4, %%mm7\n\t"
-
- "por %%mm3, %%mm6\n\t" // and merge new & old vals keeping best
- "por %%mm1, %%mm7\n\t" // "
-
- // b,e
- "movq (%%"XBX"), %%mm0\n\t" // value b from top
- "movq (%%"XBX", %%"XCX"), %%mm1\n\t" // value e from bottom
-
-// We will also calc here the max/min values to later limit comb
-// so the max excursion will not exceed the Max_Comb constant
-
-#ifdef SKIP_SEARCH
- "movq %%mm0, %%mm2\n\t"
-// pminub %%mm2, %%mm1
- V_PMINUB ("%%mm2", "%%mm1", "%%mm4")
-
-// pmaxub %%mm6, %%mm2 // clip our current results so far to be above this
- V_PMAXUB ("%%mm6", "%%mm2")
- "movq %%mm0, %%mm2\n\t"
- V_PMAXUB ("%%mm2", "%%mm1")
-// pminub %%mm6, %%mm2 // clip our current results so far to be below this
- V_PMINUB ("%%mm6", "%%mm2", "%%mm4")
-
-#else
- "movq %%mm0, %%mm2\n\t"
- "movq (%%"XAX"), %%mm4\n\t"
- "psubusb %%mm4, %%mm2\n\t"
- "psubusb %%mm0, %%mm4\n\t"
- "por %%mm2, %%mm4\n\t" // abs diff
-
- "movq %%mm1, %%mm2\n\t"
- "movq (%%"XAX", %%"XCX"), %%mm3\n\t"
- "psubusb %%mm3, %%mm2\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "por %%mm2, %%mm3\n\t" // abs diff
-// pmaxub %%mm3, %%mm4 // top or bottom pixel moved most
- V_PMAXUB ("%%mm3", "%%mm4") // top or bottom pixel moved most
- "psubusb "_Max_Mov", %%mm3\n\t" // moved more than allowed? or goes to 0?
- "pxor %%mm4, %%mm4\n\t"
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where low motion, else high motion
-
- "movq %%mm0, %%mm2\n\t"
-// pminub %%mm2, %%mm1
- V_PMINUB ("%%mm2", "%%mm1", "%%mm4")
-
-// pmaxub %%mm6, %%mm2 // clip our current results so far to be above this
- V_PMAXUB ("%%mm6", "%%mm2")
-
- "psubusb %%mm3, %%mm2\n\t" // maybe decrease it to 0000.. if no surround motion
- "movq %%mm2, "_Min_Vals"\n\t"
-
- "movq %%mm0, %%mm2\n\t"
- V_PMAXUB ("%%mm2", "%%mm1")
-// pminub %%mm6, %%mm2 // clip our current results so far to be below this
- V_PMINUB ("%%mm6", "%%mm2", "%%mm4")
- "paddusb %%mm3, %%mm2\n\t" // maybe increase it to ffffff if no surround motion
- "movq %%mm2, "_Max_Vals"\n\t"
-#endif
-
- "movq %%mm0, %%mm2\n\t"
-// pavgb %%mm2, %%mm1 // avg(b,e)
- V_PAVGB ("%%mm2", "%%mm1", "%%mm3", _ShiftMask) // avg(b,e)
-
- "movq %%mm0, %%mm3\n\t"
- "psubusb %%mm1, %%mm3\n\t"
- "psubusb %%mm0, %%mm1\n\t"
- "por %%mm1, %%mm3\n\t" // abs(c,d)
- "movq %%mm3, %%mm1\n\t" // keep copy of diffs
-
- "pxor %%mm4, %%mm4\n\t"
- "psubusb %%mm7, %%mm3\n\t" // nonzero where new weights bigger, else 0
- "pcmpeqb %%mm4, %%mm3\n\t" // now ff where new better, else 00
-
- "pcmpeqb %%mm3, %%mm4\n\t" // here ff where old better, else 00
-
- "pand %%mm3, %%mm1\n\t"
- "pand %%mm3, %%mm2\n\t"
-
- "pand %%mm4, %%mm6\n\t"
- "pand %%mm4, %%mm7\n\t"
-
- "por %%mm2, %%mm6\n\t" // our x2 value
- "por %%mm1, %%mm7\n\t" // our x2 diffs
- "movq %%mm7, %%mm4\n\t" // save as bob uncertainty indicator
-
-#else
-
- // a,f
- best[0] = (pBob[-2] + pBob[src_pitch2 + 2]) / 2;
- diff[0] = ABS (pBob[-2] - pBob[src_pitch2 + 2]);
- best[1] = (pBob[-1] + pBob[src_pitch2 + 3]) / 2;
- diff[1] = ABS (pBob[-1] - pBob[src_pitch2 + 3]);
-
- // c,d
- if (ABS (pBob[2] - pBob[src_pitch2 - 2]) < diff[0]) {
- best[0] = (pBob[2] + pBob[src_pitch2 - 2]) / 2;
- diff[0] = ABS (pBob[2] - pBob[src_pitch2 - 2]);
- }
-
- if (ABS (pBob[3] - pBob[src_pitch2 - 1]) < diff[1]) {
- best[1] = (pBob[3] + pBob[src_pitch2 - 1]) / 2;
- diff[1] = ABS (pBob[3] - pBob[src_pitch2 - 1]);
- }
-
- // j,n
- if (ABS (pBob[-4] - pBob[src_pitch2 + 4]) < diff[0]) {
- best[0] = (pBob[-4] + pBob[src_pitch2 + 4]) / 2;
- diff[0] = ABS (pBob[-4] - pBob[src_pitch2 + 4]);
- }
-
- if (ABS (pBob[-3] - pBob[src_pitch2 + 5]) < diff[1]) {
- best[1] = (pBob[-3] + pBob[src_pitch2 + 5]) / 2;
- diff[1] = ABS (pBob[-3] - pBob[src_pitch2 + 5]);
- }
-
- // k,m
- if (ABS (pBob[4] - pBob[src_pitch2 - 4]) < diff[0]) {
- best[0] = (pBob[4] + pBob[src_pitch2 - 4]) / 2;
- diff[0] = ABS (pBob[-4] - pBob[src_pitch2 - 4]);
- }
-
- if (ABS (pBob[5] - pBob[src_pitch2 - 3]) < diff[1]) {
- best[1] = (pBob[5] + pBob[src_pitch2 - 3]) / 2;
- diff[1] = ABS (pBob[-3] - pBob[src_pitch2 - 3]);
- }
- // k,m
- if (ABS (pBob[4] - pBob[src_pitch2 - 4]) < diff[0]) {
- best[0] = (pBob[4] + pBob[src_pitch2 - 4]) / 2;
- diff[0] = ABS (pBob[-4] - pBob[src_pitch2 - 4]);
- }
-
- if (ABS (pBob[5] - pBob[src_pitch2 - 3]) < diff[1]) {
- best[1] = (pBob[5] + pBob[src_pitch2 - 3]) / 2;
- diff[1] = ABS (pBob[-3] - pBob[src_pitch2 - 3]);
- }
-
-// We will also calc here the max/min values to later limit comb
-// so the max excursion will not exceed the Max_Comb constant
-
-#ifdef SKIP_SEARCH
- best[0] = CLAMP (best[0], MIN (pBob[src_pitch2], pBob[0]), MAX (pBob[src_pitch2], pBob[0]));
- best[1] = CLAMP (best[1], MIN (pBob[src_pitch2 + 1], pBob[1]), MAX (pBob[src_pitch2 + 1], pBob[1]));
-#else
- mov[0] = MAX (ABS (pBob[0] - pBobP[0]), ABS (pBob[src_pitch2] - pBobP[src_pitch2]));
- mov[1] = MAX (ABS (pBob[1] - pBobP[1]), ABS (pBob[src_pitch2 + 1] - pBobP[src_pitch2 + 1]));
-
- MinVals[0] = 0;
- MinVals[1] = 0;
- MaxVals[0] = 255;
- MaxVals[1] = 255;
-
- if (mov[0] > Max_Mov[0]) {
- MinVals[0] = MAX (MIN (pBob[0], pBob[src_pitch2]), best[0]);
- MaxVals[0] = MIN (MAX (pBob[0], pBob[src_pitch2]), best[0]);
- }
-
- if (mov[1] > Max_Mov[1]) {
- MinVals[1] = MAX (MIN (pBob[1], pBob[src_pitch2 + 1]), best[1]);
- MaxVals[1] = MIN (MAX (pBob[1], pBob[src_pitch2 + 1]), best[1]);
- }
-
- best[0] = CLAMP (best[0], MIN (pBob[src_pitch2], pBob[0]), MAX (pBob[src_pitch2], pBob[0]));
- best[1] = CLAMP (best[1], MIN (pBob[src_pitch2 + 1], pBob[1]), MAX (pBob[src_pitch2 + 1], pBob[1]));
-#endif
-
- avg[0] = (pBob[src_pitch2] + pBob[0]) / 2;
- avg[1] = (pBob[src_pitch2 + 1] + pBob[1]) / 2;
- diff2[0] = ABS (pBob[src_pitch2] - pBob[0]);
- diff2[1] = ABS (pBob[src_pitch2 + 1] - pBob[1]);
-
- if (diff2[0] < diff[0]) {
- best[0] = avg[0];
- diff[0] = diff2[0];
- }
-
- if (diff2[1] < diff[1]) {
- best[1] = avg[1];
- diff[1] = diff2[1];
- }
-#endif
diff --git a/gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h b/gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h
deleted file mode 100644
index 7e8147ec..00000000
--- a/gst/deinterlace2/tvtime/tomsmocomp/tomsmocompmacros.h
+++ /dev/null
@@ -1,164 +0,0 @@
-#include <string.h>
-#include <math.h>
-
-// Define a few macros for CPU dependent instructions.
-// I suspect I don't really understand how the C macro preprocessor works but
-// this seems to get the job done. // TRB 7/01
-
-// BEFORE USING THESE YOU MUST SET:
-
-// #define SIMD_TYPE MMXEXT (or MMX or 3DNOW)
-
-// some macros for pavgb instruction
-// V_PAVGB(mmr1, mmr2, mmr work register, smask) mmr2 may = mmrw if you can trash it
-
-#define V_PAVGB_MMX(mmr1, mmr2, mmrw, smask) \
- "movq "mmr2", "mmrw"\n\t" \
- "pand "smask", "mmrw"\n\t" \
- "psrlw $1, "mmrw"\n\t" \
- "pand "smask", "mmr1"\n\t" \
- "psrlw $1, "mmr1"\n\t" \
- "paddusb "mmrw", "mmr1"\n\t"
-#define V_PAVGB_MMXEXT(mmr1, mmr2, mmrw, smask) "pavgb "mmr2", "mmr1"\n\t"
-#define V_PAVGB_3DNOW(mmr1, mmr2, mmrw, smask) "pavgusb "mmr2", "mmr1"\n\t"
-#define V_PAVGB(mmr1, mmr2, mmrw, smask) V_PAVGB2(mmr1, mmr2, mmrw, smask, SIMD_TYPE)
-#define V_PAVGB2(mmr1, mmr2, mmrw, smask, simd_type) V_PAVGB3(mmr1, mmr2, mmrw, smask, simd_type)
-#define V_PAVGB3(mmr1, mmr2, mmrw, smask, simd_type) V_PAVGB_##simd_type(mmr1, mmr2, mmrw, smask)
-
-// some macros for pmaxub instruction
-#define V_PMAXUB_MMX(mmr1, mmr2) \
- "psubusb "mmr2", "mmr1"\n\t" \
- "paddusb "mmr2", "mmr1"\n\t"
-#define V_PMAXUB_MMXEXT(mmr1, mmr2) "pmaxub "mmr2", "mmr1"\n\t"
-#define V_PMAXUB_3DNOW(mmr1, mmr2) V_PMAXUB_MMX(mmr1, mmr2) // use MMX version
-#define V_PMAXUB(mmr1, mmr2) V_PMAXUB2(mmr1, mmr2, SIMD_TYPE)
-#define V_PMAXUB2(mmr1, mmr2, simd_type) V_PMAXUB3(mmr1, mmr2, simd_type)
-#define V_PMAXUB3(mmr1, mmr2, simd_type) V_PMAXUB_##simd_type(mmr1, mmr2)
-
-// some macros for pminub instruction
-// V_PMINUB(mmr1, mmr2, mmr work register) mmr2 may NOT = mmrw
-#define V_PMINUB_MMX(mmr1, mmr2, mmrw) \
- "pcmpeqb "mmrw", "mmrw"\n\t" \
- "psubusb "mmr2", "mmrw"\n\t" \
- "paddusb "mmrw", "mmr1"\n\t" \
- "psubusb "mmrw", "mmr1"\n\t"
-#define V_PMINUB_MMXEXT(mmr1, mmr2, mmrw) "pminub "mmr2", "mmr1"\n\t"
-#define V_PMINUB_3DNOW(mmr1, mmr2, mmrw) V_PMINUB_MMX(mmr1, mmr2, mmrw) // use MMX version
-#define V_PMINUB(mmr1, mmr2, mmrw) V_PMINUB2(mmr1, mmr2, mmrw, SIMD_TYPE)
-#define V_PMINUB2(mmr1, mmr2, mmrw, simd_type) V_PMINUB3(mmr1, mmr2, mmrw, simd_type)
-#define V_PMINUB3(mmr1, mmr2, mmrw, simd_type) V_PMINUB_##simd_type(mmr1, mmr2, mmrw)
-
-// some macros for movntq instruction
-// V_MOVNTQ(mmr1, mmr2)
-#define V_MOVNTQ_MMX(mmr1, mmr2) "movq "mmr2", "mmr1"\n\t"
-#define V_MOVNTQ_3DNOW(mmr1, mmr2) "movq "mmr2", "mmr1"\n\t"
-#define V_MOVNTQ_MMXEXT(mmr1, mmr2) "movntq "mmr2", "mmr1"\n\t"
-#define V_MOVNTQ(mmr1, mmr2) V_MOVNTQ2(mmr1, mmr2, SIMD_TYPE)
-#define V_MOVNTQ2(mmr1, mmr2, simd_type) V_MOVNTQ3(mmr1, mmr2, simd_type)
-#define V_MOVNTQ3(mmr1, mmr2, simd_type) V_MOVNTQ_##simd_type(mmr1, mmr2)
-
-// end of macros
-
-#ifdef IS_SSE2
-
-#define MERGE4PIXavg(PADDR1, PADDR2) \
- "movdqu "PADDR1", %%xmm0\n\t" /* our 4 pixels */ \
- "movdqu "PADDR2", %%xmm1\n\t" /* our pixel2 value */ \
- "movdqa %%xmm0, %%xmm2\n\t" /* another copy of our pixel1 value */ \
- "movdqa %%xmm1, %%xmm3\n\t" /* another copy of our pixel1 value */ \
- "psubusb %%xmm1, %%xmm2\n\t" \
- "psubusb %%xmm0, %%xmm3\n\t" \
- "por %%xmm3, %%xmm2\n\t" \
- "pavgb %%xmm1, %%xmm0\n\t" /* avg of 2 pixels */ \
- "movdqa %%xmm2, %%xmm3\n\t" /* another copy of our our weights */ \
- "pxor %%xmm1, %%xmm1\n\t" \
- "psubusb %%xmm7, %%xmm3\n\t" /* nonzero where old weights lower, else 0 */ \
- "pcmpeqb %%xmm1, %%xmm3\n\t" /* now ff where new better, else 00 */ \
- "pcmpeqb %%xmm3, %%xmm1\n\t" /* here ff where old better, else 00 */ \
- "pand %%xmm3, %%xmm0\n\t" /* keep only better new pixels */ \
- "pand %%xmm3, %%xmm2\n\t" /* and weights */ \
- "pand %%xmm1, %%xmm5\n\t" /* keep only better old pixels */ \
- "pand %%xmm1, %%xmm7\n\t" \
- "por %%xmm0, %%xmm5\n\t" /* and merge new & old vals */ \
- "por %%xmm2, %%xmm7\n\t"
-
-#define MERGE4PIXavgH(PADDR1A, PADDR1B, PADDR2A, PADDR2B) \
- "movdqu "PADDR1A", %%xmm0\n\t" /* our 4 pixels */ \
- "movdqu "PADDR2A", %%xmm1\n\t" /* our pixel2 value */ \
- "movdqu "PADDR1B", %%xmm2\n\t" /* our 4 pixels */ \
- "movdqu "PADDR2B", %%xmm3\n\t" /* our pixel2 value */ \
- "pavgb %%xmm2, %%xmm0\n\t" \
- "pavgb %%xmm3, %%xmm1\n\t" \
- "movdqa %%xmm0, %%xmm2\n\t" /* another copy of our pixel1 value */ \
- "movdqa %%xmm1, %%xmm3\n\t" /* another copy of our pixel1 value */ \
- "psubusb %%xmm1, %%xmm2\n\t" \
- "psubusb %%xmm0, %%xmm3\n\t" \
- "por %%xmm3, %%xmm2\n\t" \
- "pavgb %%xmm1, %%xmm0\n\t" /* avg of 2 pixels */ \
- "movdqa %%xmm2, %%xmm3\n\t" /* another copy of our our weights */ \
- "pxor %%xmm1, %%xmm1\n\t" \
- "psubusb %%xmm7, %%xmm3\n\t" /* nonzero where old weights lower, else 0 */ \
- "pcmpeqb %%xmm1, %%xmm3\n\t" /* now ff where new better, else 00 */ \
- "pcmpeqb %%xmm3, %%xmm1\n\t" /* here ff where old better, else 00 */ \
- "pand %%xmm3, %%xmm0\n\t" /* keep only better new pixels */ \
- "pand %%xmm3, %%xmm2\n\t" /* and weights */ \
- "pand %%xmm1, %%xmm5\n\t" /* keep only better old pixels */ \
- "pand %%xmm1, %%xmm7\n\t" \
- "por %%xmm0, %%xmm5\n\t" /* and merge new & old vals */ \
- "por %%xmm2, %%xmm7\n\t"
-
-#define RESET_CHROMA "por "_UVMask", %%xmm7\n\t"
-
-#else // ifdef IS_SSE2
-
-#define MERGE4PIXavg(PADDR1, PADDR2) \
- "movq "PADDR1", %%mm0\n\t" /* our 4 pixels */ \
- "movq "PADDR2", %%mm1\n\t" /* our pixel2 value */ \
- "movq %%mm0, %%mm2\n\t" /* another copy of our pixel1 value */ \
- "movq %%mm1, %%mm3\n\t" /* another copy of our pixel1 value */ \
- "psubusb %%mm1, %%mm2\n\t" \
- "psubusb %%mm0, %%mm3\n\t" \
- "por %%mm3, %%mm2\n\t" \
- V_PAVGB ("%%mm0", "%%mm1", "%%mm3", _ShiftMask) /* avg of 2 pixels */ \
- "movq %%mm2, %%mm3\n\t" /* another copy of our our weights */ \
- "pxor %%mm1, %%mm1\n\t" \
- "psubusb %%mm7, %%mm3\n\t" /* nonzero where old weights lower, else 0 */ \
- "pcmpeqb %%mm1, %%mm3\n\t" /* now ff where new better, else 00 */ \
- "pcmpeqb %%mm3, %%mm1\n\t" /* here ff where old better, else 00 */ \
- "pand %%mm3, %%mm0\n\t" /* keep only better new pixels */ \
- "pand %%mm3, %%mm2\n\t" /* and weights */ \
- "pand %%mm1, %%mm5\n\t" /* keep only better old pixels */ \
- "pand %%mm1, %%mm7\n\t" \
- "por %%mm0, %%mm5\n\t" /* and merge new & old vals */ \
- "por %%mm2, %%mm7\n\t"
-
-#define MERGE4PIXavgH(PADDR1A, PADDR1B, PADDR2A, PADDR2B) \
- "movq "PADDR1A", %%mm0\n\t" /* our 4 pixels */ \
- "movq "PADDR2A", %%mm1\n\t" /* our pixel2 value */ \
- "movq "PADDR1B", %%mm2\n\t" /* our 4 pixels */ \
- "movq "PADDR2B", %%mm3\n\t" /* our pixel2 value */ \
- V_PAVGB("%%mm0", "%%mm2", "%%mm2", _ShiftMask) \
- V_PAVGB("%%mm1", "%%mm3", "%%mm3", _ShiftMask) \
- "movq %%mm0, %%mm2\n\t" /* another copy of our pixel1 value */ \
- "movq %%mm1, %%mm3\n\t" /* another copy of our pixel1 value */ \
- "psubusb %%mm1, %%mm2\n\t" \
- "psubusb %%mm0, %%mm3\n\t" \
- "por %%mm3, %%mm2\n\t" \
- V_PAVGB("%%mm0", "%%mm1", "%%mm3", _ShiftMask) /* avg of 2 pixels */ \
- "movq %%mm2, %%mm3\n\t" /* another copy of our our weights */ \
- "pxor %%mm1, %%mm1\n\t" \
- "psubusb %%mm7, %%mm3\n\t" /* nonzero where old weights lower, else 0 */ \
- "pcmpeqb %%mm1, %%mm3\n\t" /* now ff where new better, else 00 */ \
- "pcmpeqb %%mm3, %%mm1\n\t" /* here ff where old better, else 00 */ \
- "pand %%mm3, %%mm0\n\t" /* keep only better new pixels */ \
- "pand %%mm3, %%mm2\n\t" /* and weights */ \
- "pand %%mm1, %%mm5\n\t" /* keep only better old pixels */ \
- "pand %%mm1, %%mm7\n\t" \
- "por %%mm0, %%mm5\n\t" /* and merge new & old vals */ \
- "por %%mm2, %%mm7\n\t"
-
-#define RESET_CHROMA "por "_UVMask", %%mm7\n\t"
-
-#endif
-
-