summaryrefslogtreecommitdiff
path: root/src/libffmpeg/libavcodec/libpostproc
diff options
context:
space:
mode:
Diffstat (limited to 'src/libffmpeg/libavcodec/libpostproc')
-rw-r--r--src/libffmpeg/libavcodec/libpostproc/mangle.h9
-rw-r--r--src/libffmpeg/libavcodec/libpostproc/postprocess.c266
-rw-r--r--src/libffmpeg/libavcodec/libpostproc/postprocess_altivec_template.c721
-rw-r--r--src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h10
-rw-r--r--src/libffmpeg/libavcodec/libpostproc/postprocess_template.c1477
5 files changed, 1853 insertions, 630 deletions
diff --git a/src/libffmpeg/libavcodec/libpostproc/mangle.h b/src/libffmpeg/libavcodec/libpostproc/mangle.h
index f3894cc33..aa09cd6bf 100644
--- a/src/libffmpeg/libavcodec/libpostproc/mangle.h
+++ b/src/libffmpeg/libavcodec/libpostproc/mangle.h
@@ -8,12 +8,21 @@
#define __MANGLE_H
/* Feel free to add more to the list, eg. a.out IMO */
+/* Use rip-relative addressing if compiling PIC code on x86-64. */
#if defined(__CYGWIN__) || defined(__MINGW32__) || defined(__OS2__) || \
(defined(__OpenBSD__) && !defined(__ELF__))
+#if defined(ARCH_X86_64) && defined(PIC)
+#define MANGLE(a) "_" #a"(%%rip)"
+#else
#define MANGLE(a) "_" #a
+#endif
+#else
+#if defined(ARCH_X86_64) && defined(PIC)
+#define MANGLE(a) #a"(%%rip)"
#else
#define MANGLE(a) #a
#endif
+#endif
#endif /* !__MANGLE_H */
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess.c b/src/libffmpeg/libavcodec/libpostproc/postprocess.c
index a03ff133d..e7ca0191d 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess.c
+++ b/src/libffmpeg/libavcodec/libpostproc/postprocess.c
@@ -29,10 +29,11 @@ isVertDC Ec Ec Ec
isVertMinMaxOk Ec Ec Ec
doVertLowPass E e e Ec
doVertDefFilter Ec Ec e e Ec
-isHorizDC Ec Ec
-isHorizMinMaxOk a E
-doHorizLowPass E e e
-doHorizDefFilter Ec Ec e e
+isHorizDC Ec Ec Ec
+isHorizMinMaxOk a E Ec
+doHorizLowPass E e e Ec
+doHorizDefFilter Ec Ec e e Ec
+do_a_deblock Ec E Ec E
deRing E e e* Ecp
Vertical RKAlgo1 E a a
Horizontal RKAlgo1 a a
@@ -42,7 +43,7 @@ LinIpolDeinterlace e E E*
CubicIpolDeinterlace a e e*
LinBlendDeinterlace e E E*
MedianDeinterlace# E Ec Ec
-TempDeNoiser# E e e
+TempDeNoiser# E e e Ec
* i dont have a 3dnow CPU -> its untested, but noone said it doesnt work so it seems to work
# more or less selfinvented filters so the exactness isnt too meaningfull
@@ -91,6 +92,10 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
#include "mangle.h" //FIXME should be supressed
+#ifdef HAVE_ALTIVEC_H
+#include <altivec.h>
+#endif
+
#ifndef HAVE_MEMALIGN
#define memalign(a,b) malloc(b)
#endif
@@ -108,12 +113,15 @@ try to unroll inner for(x=0 ... loop to avoid these damn if(x ... checks
#if defined(__GNUC__) && (__GNUC__ > 3 || __GNUC__ == 3 && __GNUC_MINOR__ > 0)
# define attribute_used __attribute__((used))
+# define always_inline __attribute__((always_inline)) inline
#else
# define attribute_used
+# define always_inline inline
#endif
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
static uint64_t __attribute__((aligned(8))) attribute_used w05= 0x0005000500050005LL;
+static uint64_t __attribute__((aligned(8))) attribute_used w04= 0x0004000400040004LL;
static uint64_t __attribute__((aligned(8))) attribute_used w20= 0x0020002000200020LL;
static uint64_t __attribute__((aligned(8))) attribute_used b00= 0x0000000000000000LL;
static uint64_t __attribute__((aligned(8))) attribute_used b01= 0x0101010101010101LL;
@@ -122,7 +130,6 @@ static uint64_t __attribute__((aligned(8))) attribute_used b08= 0x080808080808
static uint64_t __attribute__((aligned(8))) attribute_used b80= 0x8080808080808080LL;
#endif
-
static uint8_t clip_table[3*256];
static uint8_t * const clip_tab= clip_table + 256;
@@ -139,6 +146,8 @@ static struct PPFilter filters[]=
{"vr", "rkvdeblock", 1, 2, 4, V_RK1_FILTER},*/
{"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
{"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
+ {"ha", "ahdeblock", 1, 1, 3, H_A_DEBLOCK},
+ {"va", "avdeblock", 1, 2, 4, V_A_DEBLOCK},
{"dr", "dering", 1, 5, 6, DERING},
{"al", "autolevels", 0, 1, 2, LEVEL_FIX},
{"lb", "linblenddeint", 1, 1, 4, LINEAR_BLEND_DEINT_FILTER},
@@ -154,15 +163,16 @@ static struct PPFilter filters[]=
static char *replaceTable[]=
{
- "default", "hdeblock:a,vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
- "de", "hdeblock:a,vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
- "fast", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
- "fa", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels,tmpnoise:a:150:200:400",
+ "default", "hdeblock:a,vdeblock:a,dering:a",
+ "de", "hdeblock:a,vdeblock:a,dering:a",
+ "fast", "x1hdeblock:a,x1vdeblock:a,dering:a",
+ "fa", "x1hdeblock:a,x1vdeblock:a,dering:a",
+ "ac", "ha:a:128:7,va:a,dering:a",
NULL //End Marker
};
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
static inline void prefetchnta(void *p)
{
asm volatile( "prefetchnta (%0)\n\t"
@@ -372,32 +382,32 @@ static inline void doHorizDefFilter_C(uint8_t dst[], int stride, PPContext *c)
*/
static inline void doHorizLowPass_C(uint8_t dst[], int stride, PPContext *c)
{
-
int y;
for(y=0; y<BLOCK_SIZE; y++)
{
const int first= ABS(dst[-1] - dst[0]) < c->QP ? dst[-1] : dst[0];
const int last= ABS(dst[8] - dst[7]) < c->QP ? dst[8] : dst[7];
- int sums[9];
- sums[0] = first + dst[0];
- sums[1] = dst[0] + dst[1];
- sums[2] = dst[1] + dst[2];
- sums[3] = dst[2] + dst[3];
- sums[4] = dst[3] + dst[4];
- sums[5] = dst[4] + dst[5];
- sums[6] = dst[5] + dst[6];
- sums[7] = dst[6] + dst[7];
- sums[8] = dst[7] + last;
-
- dst[0]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
- dst[1]= ((dst[1]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
- dst[2]= ((dst[2]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
- dst[3]= ((dst[3]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
- dst[4]= ((dst[4]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
- dst[5]= ((dst[5]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
- dst[6]= (((last + dst[6])<<2) + ((dst[7] + sums[5])<<1) + sums[3] + 8)>>4;
- dst[7]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
+ int sums[10];
+ sums[0] = 4*first + dst[0] + dst[1] + dst[2] + 4;
+ sums[1] = sums[0] - first + dst[3];
+ sums[2] = sums[1] - first + dst[4];
+ sums[3] = sums[2] - first + dst[5];
+ sums[4] = sums[3] - first + dst[6];
+ sums[5] = sums[4] - dst[0] + dst[7];
+ sums[6] = sums[5] - dst[1] + last;
+ sums[7] = sums[6] - dst[2] + last;
+ sums[8] = sums[7] - dst[3] + last;
+ sums[9] = sums[8] - dst[4] + last;
+
+ dst[0]= (sums[0] + sums[2] + 2*dst[0])>>4;
+ dst[1]= (sums[1] + sums[3] + 2*dst[1])>>4;
+ dst[2]= (sums[2] + sums[4] + 2*dst[2])>>4;
+ dst[3]= (sums[3] + sums[5] + 2*dst[3])>>4;
+ dst[4]= (sums[4] + sums[6] + 2*dst[4])>>4;
+ dst[5]= (sums[5] + sums[7] + 2*dst[5])>>4;
+ dst[6]= (sums[6] + sums[8] + 2*dst[6])>>4;
+ dst[7]= (sums[7] + sums[9] + 2*dst[7])>>4;
dst+= stride;
}
@@ -469,6 +479,111 @@ static inline void horizX1Filter(uint8_t *src, int stride, int QP)
}
}
+/**
+ * accurate deblock filter
+ */
+static always_inline void do_a_deblock_C(uint8_t *src, int step, int stride, PPContext *c){
+ int y;
+ const int QP= c->QP;
+ const int dcOffset= ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
+ const int dcThreshold= dcOffset*2 + 1;
+//START_TIMER
+ src+= step*4; // src points to begin of the 8x8 Block
+ for(y=0; y<8; y++){
+ int numEq= 0;
+
+ if(((unsigned)(src[-1*step] - src[0*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 0*step] - src[1*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 1*step] - src[2*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 2*step] - src[3*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 3*step] - src[4*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 4*step] - src[5*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 5*step] - src[6*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 6*step] - src[7*step] + dcOffset)) < dcThreshold) numEq++;
+ if(((unsigned)(src[ 7*step] - src[8*step] + dcOffset)) < dcThreshold) numEq++;
+ if(numEq > c->ppMode.flatnessThreshold){
+ int min, max, x;
+
+ if(src[0] > src[step]){
+ max= src[0];
+ min= src[step];
+ }else{
+ max= src[step];
+ min= src[0];
+ }
+ for(x=2; x<8; x+=2){
+ if(src[x*step] > src[(x+1)*step]){
+ if(src[x *step] > max) max= src[ x *step];
+ if(src[(x+1)*step] < min) min= src[(x+1)*step];
+ }else{
+ if(src[(x+1)*step] > max) max= src[(x+1)*step];
+ if(src[ x *step] < min) min= src[ x *step];
+ }
+ }
+ if(max-min < 2*QP){
+ const int first= ABS(src[-1*step] - src[0]) < QP ? src[-1*step] : src[0];
+ const int last= ABS(src[8*step] - src[7*step]) < QP ? src[8*step] : src[7*step];
+
+ int sums[10];
+ sums[0] = 4*first + src[0*step] + src[1*step] + src[2*step] + 4;
+ sums[1] = sums[0] - first + src[3*step];
+ sums[2] = sums[1] - first + src[4*step];
+ sums[3] = sums[2] - first + src[5*step];
+ sums[4] = sums[3] - first + src[6*step];
+ sums[5] = sums[4] - src[0*step] + src[7*step];
+ sums[6] = sums[5] - src[1*step] + last;
+ sums[7] = sums[6] - src[2*step] + last;
+ sums[8] = sums[7] - src[3*step] + last;
+ sums[9] = sums[8] - src[4*step] + last;
+
+ src[0*step]= (sums[0] + sums[2] + 2*src[0*step])>>4;
+ src[1*step]= (sums[1] + sums[3] + 2*src[1*step])>>4;
+ src[2*step]= (sums[2] + sums[4] + 2*src[2*step])>>4;
+ src[3*step]= (sums[3] + sums[5] + 2*src[3*step])>>4;
+ src[4*step]= (sums[4] + sums[6] + 2*src[4*step])>>4;
+ src[5*step]= (sums[5] + sums[7] + 2*src[5*step])>>4;
+ src[6*step]= (sums[6] + sums[8] + 2*src[6*step])>>4;
+ src[7*step]= (sums[7] + sums[9] + 2*src[7*step])>>4;
+ }
+ }else{
+ const int middleEnergy= 5*(src[4*step] - src[3*step]) + 2*(src[2*step] - src[5*step]);
+
+ if(ABS(middleEnergy) < 8*QP)
+ {
+ const int q=(src[3*step] - src[4*step])/2;
+ const int leftEnergy= 5*(src[2*step] - src[1*step]) + 2*(src[0*step] - src[3*step]);
+ const int rightEnergy= 5*(src[6*step] - src[5*step]) + 2*(src[4*step] - src[7*step]);
+
+ int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
+ d= MAX(d, 0);
+
+ d= (5*d + 32) >> 6;
+ d*= SIGN(-middleEnergy);
+
+ if(q>0)
+ {
+ d= d<0 ? 0 : d;
+ d= d>q ? q : d;
+ }
+ else
+ {
+ d= d>0 ? 0 : d;
+ d= d<q ? q : d;
+ }
+
+ src[3*step]-= d;
+ src[4*step]+= d;
+ }
+ }
+
+ src += stride;
+ }
+/*if(step==16){
+ STOP_TIMER("step16")
+}else{
+ STOP_TIMER("stepX")
+}*/
+}
//Note: we have C, MMX, MMX2, 3DNOW version there is no 3DNOW+MMX2 one
//Plain C versions
@@ -479,15 +594,10 @@ static inline void horizX1Filter(uint8_t *src, int stride, int QP)
#ifdef ARCH_POWERPC
#ifdef HAVE_ALTIVEC
#define COMPILE_ALTIVEC
-#ifndef CONFIG_DARWIN
-#warning "################################################################################"
-#warning "WARNING: No gcc available as of today (2004-05-25) seems to be able to compile properly some of the code under non-Darwin PPC OSes. Some functions result in wrong results, while others simply won't compile (gcc explodes after allocating 1GiB+)."
-#warning "################################################################################"
-#endif //CONFIG_DARWIN
#endif //HAVE_ALTIVEC
#endif //ARCH_POWERPC
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX
@@ -506,13 +616,11 @@ static inline void horizX1Filter(uint8_t *src, int stride, int QP)
#undef HAVE_MMX2
#undef HAVE_3DNOW
#undef HAVE_ALTIVEC
-#undef ARCH_X86
#ifdef COMPILE_C
#undef HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
-#undef ARCH_X86
#define RENAME(a) a ## _C
#include "postprocess_template.c"
#endif
@@ -533,7 +641,6 @@ static inline void horizX1Filter(uint8_t *src, int stride, int QP)
#define HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
-#define ARCH_X86
#define RENAME(a) a ## _MMX
#include "postprocess_template.c"
#endif
@@ -544,7 +651,6 @@ static inline void horizX1Filter(uint8_t *src, int stride, int QP)
#define HAVE_MMX
#define HAVE_MMX2
#undef HAVE_3DNOW
-#define ARCH_X86
#define RENAME(a) a ## _MMX2
#include "postprocess_template.c"
#endif
@@ -555,7 +661,6 @@ static inline void horizX1Filter(uint8_t *src, int stride, int QP)
#define HAVE_MMX
#undef HAVE_MMX2
#define HAVE_3DNOW
-#define ARCH_X86
#define RENAME(a) a ## _3DNow
#include "postprocess_template.c"
#endif
@@ -573,7 +678,7 @@ static inline void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int
// difference wouldnt be messureable here but its much better because
// someone might exchange the cpu whithout restarting mplayer ;)
#ifdef RUNTIME_CPUDETECT
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
// ordered per speed fasterst first
if(c->cpuCaps & PP_CPU_CAPS_MMX2)
postProcess_MMX2(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
@@ -586,7 +691,7 @@ static inline void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int
#else
#ifdef ARCH_POWERPC
#ifdef HAVE_ALTIVEC
- else if(c->cpuCaps & PP_CPU_CAPS_ALTIVEC)
+ if(c->cpuCaps & PP_CPU_CAPS_ALTIVEC)
postProcess_altivec(src, srcStride, dst, dstStride, width, height, QPs, QPStride, isColor, c);
else
#endif
@@ -614,24 +719,21 @@ static inline void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int
/* -pp Command line Help
*/
char *pp_help=
-"<filterName>[:<option>[:<option>...]][[,|/][-]<filterName>[:<option>...]]...\n"
-"long form example:\n"
-"vdeblock:autoq/hdeblock:autoq/linblenddeint default,-vdeblock\n"
-"short form example:\n"
-"vb:a/hb:a/lb de,-vb\n"
-"more examples:\n"
-"tn:64:128:256\n"
+"Available postprocessing filters:\n"
"Filters Options\n"
"short long name short long option Description\n"
"* * a autoq CPU power dependent enabler\n"
" c chrom chrominance filtering enabled\n"
" y nochrom chrominance filtering disabled\n"
+" n noluma luma filtering disabled\n"
"hb hdeblock (2 threshold) horizontal deblocking filter\n"
" 1. difference factor: default=32, higher -> more deblocking\n"
" 2. flatness threshold: default=39, lower -> more deblocking\n"
" the h & v deblocking filters share these\n"
" so you can't set different thresholds for h / v\n"
"vb vdeblock (2 threshold) vertical deblocking filter\n"
+"ha hadeblock (2 threshold) horizontal deblocking filter\n"
+"va vadeblock (2 threshold) vertical deblocking filter\n"
"h1 x1hdeblock experimental h deblock filter 1\n"
"v1 x1vdeblock experimental v deblock filter 1\n"
"dr dering deringing filter\n"
@@ -642,11 +744,20 @@ char *pp_help=
"ci cubicipoldeint cubic interpolating deinterlacer\n"
"md mediandeint median deinterlacer\n"
"fd ffmpegdeint ffmpeg deinterlacer\n"
-"de default hb:a,vb:a,dr:a,al\n"
-"fa fast h1:a,v1:a,dr:a,al\n"
+"l5 lowpass5 FIR lowpass deinterlacer\n"
+"de default hb:a,vb:a,dr:a\n"
+"fa fast h1:a,v1:a,dr:a\n"
"tn tmpnoise (3 threshold) temporal noise reducer\n"
" 1. <= 2. <= 3. larger -> stronger filtering\n"
"fq forceQuant <quantizer> force quantizer\n"
+"Usage:\n"
+"<filterName>[:<option>[:<option>...]][[,|/][-]<filterName>[:<option>...]]...\n"
+"long form example:\n"
+"vdeblock:autoq/hdeblock:autoq/linblenddeint default,-vdeblock\n"
+"short form example:\n"
+"vb:a/hb:a/lb de,-vb\n"
+"more examples:\n"
+"tn:64:128:256\n"
;
pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
@@ -680,6 +791,7 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
char *filterName;
int q= 1000000; //PP_QUALITY_MAX;
int chrom=-1;
+ int luma=-1;
char *option;
char *options[OPTIONS_ARRAY_SIZE];
int i;
@@ -707,6 +819,7 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
+ else if(!strcmp("noluma", option) || !strcmp("n", option)) luma=0;
else
{
options[numOfUnknownOptions] = option;
@@ -753,7 +866,7 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
filterNameOk=1;
if(!enable) break; // user wants to disable it
- if(q >= filters[i].minLumQuality)
+ if(q >= filters[i].minLumQuality && luma)
ppMode->lumMode|= filters[i].mask;
if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
if(q >= filters[i].minChromQuality)
@@ -793,7 +906,8 @@ pp_mode_t *pp_get_mode_by_name_and_quality(char *name, int quality)
}
}
}
- else if(filters[i].mask == V_DEBLOCK || filters[i].mask == H_DEBLOCK)
+ else if(filters[i].mask == V_DEBLOCK || filters[i].mask == H_DEBLOCK
+ || filters[i].mask == V_A_DEBLOCK || filters[i].mask == H_A_DEBLOCK)
{
int o;
@@ -940,18 +1054,20 @@ void pp_postprocess(uint8_t * src[3], int srcStride[3],
int mbHeight= (height+15)>>4;
PPMode *mode = (PPMode*)vm;
PPContext *c = (PPContext*)vc;
- int minStride= MAX(srcStride[0], dstStride[0]);
+ int minStride= MAX(ABS(srcStride[0]), ABS(dstStride[0]));
+ int absQPStride = ABS(QPStride);
- if(c->stride < minStride || c->qpStride < QPStride)
+ // c->stride and c->QPStride are always positive
+ if(c->stride < minStride || c->qpStride < absQPStride)
reallocBuffers(c, width, height,
MAX(minStride, c->stride),
- MAX(c->qpStride, QPStride));
+ MAX(c->qpStride, absQPStride));
if(QP_store==NULL || (mode->lumMode & FORCE_QUANT))
{
int i;
QP_store= c->forcedQPTable;
- QPStride= 0;
+ absQPStride = QPStride = 0;
if(mode->lumMode & FORCE_QUANT)
for(i=0; i<mbWidth; i++) QP_store[i]= mode->forcedQuant;
else
@@ -961,7 +1077,7 @@ void pp_postprocess(uint8_t * src[3], int srcStride[3],
if(pict_type & PP_PICT_TYPE_QP2){
int i;
- const int count= mbHeight * QPStride;
+ const int count= mbHeight * absQPStride;
for(i=0; i<(count>>2); i++){
((uint32_t*)c->stdQPTable)[i] = (((uint32_t*)QP_store)[i]>>1) & 0x7F7F7F7F;
}
@@ -969,6 +1085,7 @@ void pp_postprocess(uint8_t * src[3], int srcStride[3],
c->stdQPTable[i] = QP_store[i]>>1;
}
QP_store= c->stdQPTable;
+ QPStride= absQPStride;
}
if(0){
@@ -984,13 +1101,22 @@ for(y=0; y<mbHeight; y++){
if((pict_type&7)!=3)
{
- int i;
- const int count= mbHeight * QPStride;
- for(i=0; i<(count>>2); i++){
- ((uint32_t*)c->nonBQPTable)[i] = ((uint32_t*)QP_store)[i] & 0x3F3F3F3F;
- }
- for(i<<=2; i<count; i++){
- c->nonBQPTable[i] = QP_store[i] & 0x3F;
+ if (QPStride >= 0) {
+ int i;
+ const int count= mbHeight * QPStride;
+ for(i=0; i<(count>>2); i++){
+ ((uint32_t*)c->nonBQPTable)[i] = ((uint32_t*)QP_store)[i] & 0x3F3F3F3F;
+ }
+ for(i<<=2; i<count; i++){
+ c->nonBQPTable[i] = QP_store[i] & 0x3F;
+ }
+ } else {
+ int i,j;
+ for(i=0; i<mbHeight; i++) {
+ for(j=0; j<absQPStride; j++) {
+ c->nonBQPTable[i*absQPStride+j] = QP_store[i*QPStride+j] & 0x3F;
+ }
+ }
}
}
@@ -1014,8 +1140,8 @@ for(y=0; y<mbHeight; y++){
}
else if(srcStride[1] == dstStride[1] && srcStride[2] == dstStride[2])
{
- memcpy(dst[1], src[1], srcStride[1]*height);
- memcpy(dst[2], src[2], srcStride[2]*height);
+ linecpy(dst[1], src[1], height, srcStride[1]);
+ linecpy(dst[2], src[2], height, srcStride[2]);
}
else
{
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_altivec_template.c b/src/libffmpeg/libavcodec/libpostproc/postprocess_altivec_template.c
index 0c84873cc..1c59b9465 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess_altivec_template.c
+++ b/src/libffmpeg/libavcodec/libpostproc/postprocess_altivec_template.c
@@ -25,6 +25,39 @@
#define AVV(x...) {x}
#endif
+#define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
+ do { \
+ __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
+ __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
+ __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
+ __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
+ tempA1 = vec_mergeh (src_a, src_e); \
+ tempB1 = vec_mergel (src_a, src_e); \
+ tempC1 = vec_mergeh (src_b, src_f); \
+ tempD1 = vec_mergel (src_b, src_f); \
+ tempE1 = vec_mergeh (src_c, src_g); \
+ tempF1 = vec_mergel (src_c, src_g); \
+ tempG1 = vec_mergeh (src_d, src_h); \
+ tempH1 = vec_mergel (src_d, src_h); \
+ tempA2 = vec_mergeh (tempA1, tempE1); \
+ tempB2 = vec_mergel (tempA1, tempE1); \
+ tempC2 = vec_mergeh (tempB1, tempF1); \
+ tempD2 = vec_mergel (tempB1, tempF1); \
+ tempE2 = vec_mergeh (tempC1, tempG1); \
+ tempF2 = vec_mergel (tempC1, tempG1); \
+ tempG2 = vec_mergeh (tempD1, tempH1); \
+ tempH2 = vec_mergel (tempD1, tempH1); \
+ src_a = vec_mergeh (tempA2, tempE2); \
+ src_b = vec_mergel (tempA2, tempE2); \
+ src_c = vec_mergeh (tempB2, tempF2); \
+ src_d = vec_mergel (tempB2, tempF2); \
+ src_e = vec_mergeh (tempC2, tempG2); \
+ src_f = vec_mergel (tempC2, tempG2); \
+ src_g = vec_mergeh (tempD2, tempH2); \
+ src_h = vec_mergel (tempD2, tempH2); \
+ } while (0)
+
+
static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
@@ -40,7 +73,9 @@ static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c)
vector signed short v2QP;
vector unsigned short v4QP;
vector unsigned short v_dcThreshold;
- int two_vectors = ((((unsigned long)src2 % 16) > 8) || (stride % 16)) ? 1 : 0;
+ const int properStride = (stride % 16);
+ const int srcAlign = ((unsigned long)src2 % 16);
+ const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
const vector signed int zero = vec_splat_s32(0);
const vector signed short mask = vec_splat_s16(1);
vector signed int v_numEq = vec_splat_s32(0);
@@ -57,6 +92,8 @@ static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c)
src2 += stride * 4;
+ vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3, v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
+
#define LOAD_LINE(i) \
register int j##i = i * stride; \
vector unsigned char perm##i = vec_lvsl(j##i, src2); \
@@ -66,19 +103,41 @@ static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c)
v_srcA2##i = vec_ld(j##i + 16, src2); \
const vector unsigned char v_srcA##i = \
vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
- vector signed short v_srcAss##i = \
+ v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i)
- LOAD_LINE(0);
- LOAD_LINE(1);
- LOAD_LINE(2);
- LOAD_LINE(3);
- LOAD_LINE(4);
- LOAD_LINE(5);
- LOAD_LINE(6);
- LOAD_LINE(7);
+#define LOAD_LINE_ALIGNED(i) \
+ register int j##i = i * stride; \
+ const vector unsigned char v_srcA##i = vec_ld(j##i, src2); \
+ v_srcAss##i = \
+ (vector signed short)vec_mergeh((vector signed char)zero, \
+ (vector signed char)v_srcA##i)
+
+ // special casing the aligned case is worthwhile, as all call from
+ // the (transposed) horizontable deblocks will be aligned, i naddition
+ // to the naturraly aligned vertical deblocks.
+ if (properStride && srcAlign) {
+ LOAD_LINE_ALIGNED(0);
+ LOAD_LINE_ALIGNED(1);
+ LOAD_LINE_ALIGNED(2);
+ LOAD_LINE_ALIGNED(3);
+ LOAD_LINE_ALIGNED(4);
+ LOAD_LINE_ALIGNED(5);
+ LOAD_LINE_ALIGNED(6);
+ LOAD_LINE_ALIGNED(7);
+ } else {
+ LOAD_LINE(0);
+ LOAD_LINE(1);
+ LOAD_LINE(2);
+ LOAD_LINE(3);
+ LOAD_LINE(4);
+ LOAD_LINE(5);
+ LOAD_LINE(6);
+ LOAD_LINE(7);
+ }
#undef LOAD_LINE
+#undef LOAD_LINE_ALIGNED
#define ITER(i, j) \
const vector signed short v_diff##i = \
@@ -133,7 +192,6 @@ static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c)
else return 2;
}
-
static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
/*
this code makes no assumption on src or stride.
@@ -145,112 +203,130 @@ static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c)
*/
uint8_t *src2 = src;
const vector signed int zero = vec_splat_s32(0);
+ const int properStride = (stride % 16);
+ const int srcAlign = ((unsigned long)src2 % 16);
short __attribute__ ((aligned(16))) qp[8];
qp[0] = c->QP;
vector signed short vqp = vec_ld(0, qp);
vqp = vec_splat(vqp, 0);
+ src2 += stride*3;
+
+ vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
+ vector unsigned char vbA0, vbA1, vbA2, vbA3, vbA4, vbA5, vbA6, vbA7, vbA8, vbA9;
+ vector unsigned char vbB0, vbB1, vbB2, vbB3, vbB4, vbB5, vbB6, vbB7, vbB8, vbB9;
+ vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
+
#define LOAD_LINE(i) \
const vector unsigned char perml##i = \
vec_lvsl(i * stride, src2); \
- const vector unsigned char vbA##i = \
- vec_ld(i * stride, src2); \
- const vector unsigned char vbB##i = \
- vec_ld(i * stride + 16, src2); \
- const vector unsigned char vbT##i = \
- vec_perm(vbA##i, vbB##i, perml##i); \
- const vector signed short vb##i = \
+ vbA##i = vec_ld(i * stride, src2); \
+ vbB##i = vec_ld(i * stride + 16, src2); \
+ vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
+ vb##i = \
(vector signed short)vec_mergeh((vector unsigned char)zero, \
(vector unsigned char)vbT##i)
-
- src2 += stride*3;
- LOAD_LINE(0);
- LOAD_LINE(1);
- LOAD_LINE(2);
- LOAD_LINE(3);
- LOAD_LINE(4);
- LOAD_LINE(5);
- LOAD_LINE(6);
- LOAD_LINE(7);
- LOAD_LINE(8);
- LOAD_LINE(9);
+#define LOAD_LINE_ALIGNED(i) \
+ register int j##i = i * stride; \
+ vbT##i = vec_ld(j##i, src2); \
+ vb##i = \
+ (vector signed short)vec_mergeh((vector signed char)zero, \
+ (vector signed char)vbT##i)
+
+ // special casing the aligned case is worthwhile, as all call from
+ // the (transposed) horizontable deblocks will be aligned, in addition
+ // to the naturraly aligned vertical deblocks.
+ if (properStride && srcAlign) {
+ LOAD_LINE_ALIGNED(0);
+ LOAD_LINE_ALIGNED(1);
+ LOAD_LINE_ALIGNED(2);
+ LOAD_LINE_ALIGNED(3);
+ LOAD_LINE_ALIGNED(4);
+ LOAD_LINE_ALIGNED(5);
+ LOAD_LINE_ALIGNED(6);
+ LOAD_LINE_ALIGNED(7);
+ LOAD_LINE_ALIGNED(8);
+ LOAD_LINE_ALIGNED(9);
+ } else {
+ LOAD_LINE(0);
+ LOAD_LINE(1);
+ LOAD_LINE(2);
+ LOAD_LINE(3);
+ LOAD_LINE(4);
+ LOAD_LINE(5);
+ LOAD_LINE(6);
+ LOAD_LINE(7);
+ LOAD_LINE(8);
+ LOAD_LINE(9);
+ }
#undef LOAD_LINE
+#undef LOAD_LINE_ALIGNED
const vector unsigned short v_1 = vec_splat_u16(1);
const vector unsigned short v_2 = vec_splat_u16(2);
const vector unsigned short v_4 = vec_splat_u16(4);
- const vector signed short v_8 = vec_splat_s16(8);
-
- const vector signed short v_first = vec_sel(vb1, vb0,
- vec_cmplt(vec_abs(vec_sub(vb0, vb1)),
- vqp));
- const vector signed short v_last = vec_sel(vb8, vb9,
- vec_cmplt(vec_abs(vec_sub(vb8, vb9)),
- vqp));
-
- const vector signed short v_sums0 = vec_add(v_first, vb1);
- const vector signed short v_sums1 = vec_add(vb1, vb2);
- const vector signed short v_sums2 = vec_add(vb2, vb3);
- const vector signed short v_sums3 = vec_add(vb3, vb4);
- const vector signed short v_sums4 = vec_add(vb4, vb5);
- const vector signed short v_sums5 = vec_add(vb5, vb6);
- const vector signed short v_sums6 = vec_add(vb6, vb7);
- const vector signed short v_sums7 = vec_add(vb7, vb8);
- const vector signed short v_sums8 = vec_add(vb8, v_last);
-
- const vector signed short vr1 = vec_sra(vec_add(vec_add(vec_sl(v_sums0, v_2),
- vec_sl(vec_add(v_first, v_sums2), v_1)),
- vec_add(v_sums4, v_8)),
- v_4);
- const vector signed short vr2 = vec_sra(vec_add(vec_add(vec_sl(vb2, v_2),
- v_sums5),
- vec_add(v_8,
- vec_sl(vec_add(v_first,
- vec_add(v_sums0, v_sums3)),
- v_1))),
- v_4);
- const vector signed short vr3 = vec_sra(vec_add(vec_add(vec_sl(vb3, v_2),
- v_sums6),
- vec_add(v_8,
- vec_sl(vec_add(v_first,
- vec_add(v_sums1, v_sums4)),
- v_1))),
- v_4);
- const vector signed short vr4 = vec_sra(vec_add(vec_add(vec_sl(vb4, v_2),
- v_sums7),
- vec_add(v_8,
- vec_add(v_sums0,
- vec_sl(vec_add(v_sums2, v_sums5),
- v_1)))),
- v_4);
- const vector signed short vr5 = vec_sra(vec_add(vec_add(vec_sl(vb5, v_2),
- v_sums8),
- vec_add(v_8,
- vec_add(v_sums1,
- vec_sl(vec_add(v_sums3, v_sums6),
- v_1)))),
- v_4);
- const vector signed short vr6 = vec_sra(vec_add(vec_add(vec_sl(vb6, v_2),
- v_sums2),
- vec_add(v_8,
- vec_sl(vec_add(v_last,
- vec_add(v_sums7, v_sums4)),
- v_1))),
- v_4);
- const vector signed short vr7 = vec_sra(vec_add(vec_add(vec_sl(vec_add(v_last, vb7), v_2),
- vec_sl(vec_add(vb8, v_sums5), v_1)),
- vec_add(v_8, v_sums3)),
- v_4);
- const vector signed short vr8 = vec_sra(vec_add(vec_add(vec_sl(v_sums8, v_2),
- vec_sl(vec_add(v_last, v_sums6), v_1)),
- vec_add(v_sums4, v_8)),
- v_4);
-
- const vector unsigned char neg1 = (vector unsigned char)AVV(-1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1);
- const vector unsigned char permHH = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
+
+ const vector signed short v_diff01 = vec_sub(vb0, vb1);
+ const vector unsigned short v_cmp01 =
+ (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
+ const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
+ const vector signed short v_diff89 = vec_sub(vb8, vb9);
+ const vector unsigned short v_cmp89 =
+ (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
+ const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
+
+ const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
+ const vector signed short temp02 = vec_add(vb2, vb3);
+ const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
+ const vector signed short v_sumsB0 = vec_add(temp02, temp03);
+
+ const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
+ const vector signed short v_sumsB1 = vec_add(temp11, vb4);
+
+ const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
+ const vector signed short v_sumsB2 = vec_add(temp21, vb5);
+
+ const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
+ const vector signed short v_sumsB3 = vec_add(temp31, vb6);
+
+ const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
+ const vector signed short v_sumsB4 = vec_add(temp41, vb7);
+
+ const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
+ const vector signed short v_sumsB5 = vec_add(temp51, vb8);
+
+ const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
+ const vector signed short v_sumsB6 = vec_add(temp61, v_last);
+
+ const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
+ const vector signed short v_sumsB7 = vec_add(temp71, v_last);
+
+ const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
+ const vector signed short v_sumsB8 = vec_add(temp81, v_last);
+
+ const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
+ const vector signed short v_sumsB9 = vec_add(temp91, v_last);
+
+#define COMPUTE_VR(i, j, k) \
+ const vector signed short temps1##i = \
+ vec_add(v_sumsB##i, v_sumsB##k); \
+ const vector signed short temps2##i = \
+ vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
+ const vector signed short vr##j = vec_sra(temps2##i, v_4)
+
+ COMPUTE_VR(0, 1, 2);
+ COMPUTE_VR(1, 2, 3);
+ COMPUTE_VR(2, 3, 4);
+ COMPUTE_VR(3, 4, 5);
+ COMPUTE_VR(4, 5, 6);
+ COMPUTE_VR(5, 6, 7);
+ COMPUTE_VR(6, 7, 8);
+ COMPUTE_VR(7, 8, 9);
+
+ const vector signed char neg1 = vec_splat_s8(-1);
+ const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
#define PACK_AND_STORE(i) \
const vector unsigned char perms##i = \
@@ -260,7 +336,7 @@ static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c)
const vector unsigned char vg##i = \
vec_perm(vf##i, vbT##i, permHH); \
const vector unsigned char mask##i = \
- vec_perm((vector unsigned char)zero, neg1, perms##i); \
+ vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
const vector unsigned char vg2##i = \
vec_perm(vg##i, vg##i, perms##i); \
const vector unsigned char svA##i = \
@@ -270,16 +346,37 @@ static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c)
vec_st(svA##i, i * stride, src2); \
vec_st(svB##i, i * stride + 16, src2)
- PACK_AND_STORE(1);
- PACK_AND_STORE(2);
- PACK_AND_STORE(3);
- PACK_AND_STORE(4);
- PACK_AND_STORE(5);
- PACK_AND_STORE(6);
- PACK_AND_STORE(7);
- PACK_AND_STORE(8);
+#define PACK_AND_STORE_ALIGNED(i) \
+ const vector unsigned char vf##i = \
+ vec_packsu(vr##i, (vector signed short)zero); \
+ const vector unsigned char vg##i = \
+ vec_perm(vf##i, vbT##i, permHH); \
+ vec_st(vg##i, i * stride, src2)
+ // special casing the aligned case is worthwhile, as all call from
+ // the (transposed) horizontable deblocks will be aligned, in addition
+ // to the naturraly aligned vertical deblocks.
+ if (properStride && srcAlign) {
+ PACK_AND_STORE_ALIGNED(1);
+ PACK_AND_STORE_ALIGNED(2);
+ PACK_AND_STORE_ALIGNED(3);
+ PACK_AND_STORE_ALIGNED(4);
+ PACK_AND_STORE_ALIGNED(5);
+ PACK_AND_STORE_ALIGNED(6);
+ PACK_AND_STORE_ALIGNED(7);
+ PACK_AND_STORE_ALIGNED(8);
+ } else {
+ PACK_AND_STORE(1);
+ PACK_AND_STORE(2);
+ PACK_AND_STORE(3);
+ PACK_AND_STORE(4);
+ PACK_AND_STORE(5);
+ PACK_AND_STORE(6);
+ PACK_AND_STORE(7);
+ PACK_AND_STORE(8);
+ }
#undef PACK_AND_STORE
+#undef PACK_AND_STORE_ALIGNED
}
@@ -383,12 +480,10 @@ static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext
/* finally, stores */
const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero);
-
- const vector unsigned char neg1 = (vector unsigned char)AVV(-1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1);
-
- const vector unsigned char permHH = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
- 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
+
+ const vector signed char neg1 = vec_splat_s8(-1);
+ const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
#define STORE(i) \
const vector unsigned char perms##i = \
@@ -396,7 +491,7 @@ static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext
const vector unsigned char vg##i = \
vec_perm(st##i, vbT##i, permHH); \
const vector unsigned char mask##i = \
- vec_perm((vector unsigned char)zero, neg1, perms##i); \
+ vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
const vector unsigned char vg2##i = \
vec_perm(vg##i, vg##i, perms##i); \
const vector unsigned char svA##i = \
@@ -680,7 +775,7 @@ static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
ITER(6, 7, 8);
ITER(7, 8, 9);
- const vector signed char neg1 = vec_splat_s8( -1 );
+ const vector signed char neg1 = vec_splat_s8(-1);
#define STORE_LINE(i) \
const vector unsigned char permST##i = \
@@ -708,6 +803,394 @@ static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
#undef F2
}
-#define horizClassify_altivec(a...) horizClassify_C(a)
#define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
#define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
+#define do_a_deblock_altivec(a...) do_a_deblock_C(a)
+
+static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
+ uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
+{
+ const vector signed int zero = vec_splat_s32(0);
+ const vector signed short vsint16_1 = vec_splat_s16(1);
+ vector signed int v_dp = zero;
+ vector signed int v_sysdp = zero;
+ int d, sysd, i;
+
+ tempBluredPast[127]= maxNoise[0];
+ tempBluredPast[128]= maxNoise[1];
+ tempBluredPast[129]= maxNoise[2];
+
+#define LOAD_LINE(src, i) \
+ register int j##src##i = i * stride; \
+ vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
+ const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
+ const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
+ const vector unsigned char v_##src##A##i = \
+ vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
+ vector signed short v_##src##Ass##i = \
+ (vector signed short)vec_mergeh((vector signed char)zero, \
+ (vector signed char)v_##src##A##i)
+
+ LOAD_LINE(src, 0);
+ LOAD_LINE(src, 1);
+ LOAD_LINE(src, 2);
+ LOAD_LINE(src, 3);
+ LOAD_LINE(src, 4);
+ LOAD_LINE(src, 5);
+ LOAD_LINE(src, 6);
+ LOAD_LINE(src, 7);
+
+ LOAD_LINE(tempBlured, 0);
+ LOAD_LINE(tempBlured, 1);
+ LOAD_LINE(tempBlured, 2);
+ LOAD_LINE(tempBlured, 3);
+ LOAD_LINE(tempBlured, 4);
+ LOAD_LINE(tempBlured, 5);
+ LOAD_LINE(tempBlured, 6);
+ LOAD_LINE(tempBlured, 7);
+#undef LOAD_LINE
+
+#define ACCUMULATE_DIFFS(i) \
+ vector signed short v_d##i = vec_sub(v_tempBluredAss##i, \
+ v_srcAss##i); \
+ v_dp = vec_msums(v_d##i, v_d##i, v_dp); \
+ v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp)
+
+ ACCUMULATE_DIFFS(0);
+ ACCUMULATE_DIFFS(1);
+ ACCUMULATE_DIFFS(2);
+ ACCUMULATE_DIFFS(3);
+ ACCUMULATE_DIFFS(4);
+ ACCUMULATE_DIFFS(5);
+ ACCUMULATE_DIFFS(6);
+ ACCUMULATE_DIFFS(7);
+#undef ACCUMULATE_DIFFS
+
+ v_dp = vec_sums(v_dp, zero);
+ v_sysdp = vec_sums(v_sysdp, zero);
+
+ v_dp = vec_splat(v_dp, 3);
+ v_sysdp = vec_splat(v_sysdp, 3);
+
+ vec_ste(v_dp, 0, &d);
+ vec_ste(v_sysdp, 0, &sysd);
+
+ i = d;
+ d = (4*d
+ +(*(tempBluredPast-256))
+ +(*(tempBluredPast-1))+ (*(tempBluredPast+1))
+ +(*(tempBluredPast+256))
+ +4)>>3;
+
+ *tempBluredPast=i;
+
+ if (d > maxNoise[1]) {
+ if (d < maxNoise[2]) {
+#define OP(i) v_tempBluredAss##i = vec_avg(v_tempBluredAss##i, v_srcAss##i);
+
+ OP(0);
+ OP(1);
+ OP(2);
+ OP(3);
+ OP(4);
+ OP(5);
+ OP(6);
+ OP(7);
+#undef OP
+ } else {
+#define OP(i) v_tempBluredAss##i = v_srcAss##i;
+
+ OP(0);
+ OP(1);
+ OP(2);
+ OP(3);
+ OP(4);
+ OP(5);
+ OP(6);
+ OP(7);
+#undef OP
+ }
+ } else {
+ if (d < maxNoise[0]) {
+ const vector signed short vsint16_7 = vec_splat_s16(7);
+ const vector signed short vsint16_4 = vec_splat_s16(4);
+ const vector unsigned short vuint16_3 = vec_splat_u16(3);
+
+#define OP(i) \
+ const vector signed short v_temp##i = \
+ vec_mladd(v_tempBluredAss##i, \
+ vsint16_7, v_srcAss##i); \
+ const vector signed short v_temp2##i = \
+ vec_add(v_temp##i, vsint16_4); \
+ v_tempBluredAss##i = vec_sr(v_temp2##i, vuint16_3)
+
+ OP(0);
+ OP(1);
+ OP(2);
+ OP(3);
+ OP(4);
+ OP(5);
+ OP(6);
+ OP(7);
+#undef OP
+ } else {
+ const vector signed short vsint16_3 = vec_splat_s16(3);
+ const vector signed short vsint16_2 = vec_splat_s16(2);
+
+#define OP(i) \
+ const vector signed short v_temp##i = \
+ vec_mladd(v_tempBluredAss##i, \
+ vsint16_3, v_srcAss##i); \
+ const vector signed short v_temp2##i = \
+ vec_add(v_temp##i, vsint16_2); \
+ v_tempBluredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2)
+
+ OP(0);
+ OP(1);
+ OP(2);
+ OP(3);
+ OP(4);
+ OP(5);
+ OP(6);
+ OP(7);
+#undef OP
+ }
+ }
+
+ const vector signed char neg1 = vec_splat_s8(-1);
+ const vector unsigned char permHH = (const vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
+
+#define PACK_AND_STORE(src, i) \
+ const vector unsigned char perms##src##i = \
+ vec_lvsr(i * stride, src); \
+ const vector unsigned char vf##src##i = \
+ vec_packsu(v_tempBluredAss##i, (vector signed short)zero); \
+ const vector unsigned char vg##src##i = \
+ vec_perm(vf##src##i, v_##src##A##i, permHH); \
+ const vector unsigned char mask##src##i = \
+ vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \
+ const vector unsigned char vg2##src##i = \
+ vec_perm(vg##src##i, vg##src##i, perms##src##i); \
+ const vector unsigned char svA##src##i = \
+ vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \
+ const vector unsigned char svB##src##i = \
+ vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \
+ vec_st(svA##src##i, i * stride, src); \
+ vec_st(svB##src##i, i * stride + 16, src)
+
+ PACK_AND_STORE(src, 0);
+ PACK_AND_STORE(src, 1);
+ PACK_AND_STORE(src, 2);
+ PACK_AND_STORE(src, 3);
+ PACK_AND_STORE(src, 4);
+ PACK_AND_STORE(src, 5);
+ PACK_AND_STORE(src, 6);
+ PACK_AND_STORE(src, 7);
+ PACK_AND_STORE(tempBlured, 0);
+ PACK_AND_STORE(tempBlured, 1);
+ PACK_AND_STORE(tempBlured, 2);
+ PACK_AND_STORE(tempBlured, 3);
+ PACK_AND_STORE(tempBlured, 4);
+ PACK_AND_STORE(tempBlured, 5);
+ PACK_AND_STORE(tempBlured, 6);
+ PACK_AND_STORE(tempBlured, 7);
+#undef PACK_AND_STORE
+}
+
+static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
+ const vector unsigned char zero = vec_splat_u8(0);
+
+#define LOAD_DOUBLE_LINE(i, j) \
+ vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
+ vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
+ vector unsigned char srcA##i = vec_ld(i * stride, src); \
+ vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
+ vector unsigned char srcC##i = vec_ld(j * stride, src); \
+ vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
+ vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
+ vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
+
+ LOAD_DOUBLE_LINE(0, 1);
+ LOAD_DOUBLE_LINE(2, 3);
+ LOAD_DOUBLE_LINE(4, 5);
+ LOAD_DOUBLE_LINE(6, 7);
+#undef LOAD_DOUBLE_LINE
+
+ vector unsigned char tempA = vec_mergeh(src0, zero);
+ vector unsigned char tempB = vec_mergel(src0, zero);
+ vector unsigned char tempC = vec_mergeh(src1, zero);
+ vector unsigned char tempD = vec_mergel(src1, zero);
+ vector unsigned char tempE = vec_mergeh(src2, zero);
+ vector unsigned char tempF = vec_mergel(src2, zero);
+ vector unsigned char tempG = vec_mergeh(src3, zero);
+ vector unsigned char tempH = vec_mergel(src3, zero);
+ vector unsigned char tempI = vec_mergeh(src4, zero);
+ vector unsigned char tempJ = vec_mergel(src4, zero);
+ vector unsigned char tempK = vec_mergeh(src5, zero);
+ vector unsigned char tempL = vec_mergel(src5, zero);
+ vector unsigned char tempM = vec_mergeh(src6, zero);
+ vector unsigned char tempN = vec_mergel(src6, zero);
+ vector unsigned char tempO = vec_mergeh(src7, zero);
+ vector unsigned char tempP = vec_mergel(src7, zero);
+
+ vector unsigned char temp0 = vec_mergeh(tempA, tempI);
+ vector unsigned char temp1 = vec_mergel(tempA, tempI);
+ vector unsigned char temp2 = vec_mergeh(tempB, tempJ);
+ vector unsigned char temp3 = vec_mergel(tempB, tempJ);
+ vector unsigned char temp4 = vec_mergeh(tempC, tempK);
+ vector unsigned char temp5 = vec_mergel(tempC, tempK);
+ vector unsigned char temp6 = vec_mergeh(tempD, tempL);
+ vector unsigned char temp7 = vec_mergel(tempD, tempL);
+ vector unsigned char temp8 = vec_mergeh(tempE, tempM);
+ vector unsigned char temp9 = vec_mergel(tempE, tempM);
+ vector unsigned char temp10 = vec_mergeh(tempF, tempN);
+ vector unsigned char temp11 = vec_mergel(tempF, tempN);
+ vector unsigned char temp12 = vec_mergeh(tempG, tempO);
+ vector unsigned char temp13 = vec_mergel(tempG, tempO);
+ vector unsigned char temp14 = vec_mergeh(tempH, tempP);
+ vector unsigned char temp15 = vec_mergel(tempH, tempP);
+
+ tempA = vec_mergeh(temp0, temp8);
+ tempB = vec_mergel(temp0, temp8);
+ tempC = vec_mergeh(temp1, temp9);
+ tempD = vec_mergel(temp1, temp9);
+ tempE = vec_mergeh(temp2, temp10);
+ tempF = vec_mergel(temp2, temp10);
+ tempG = vec_mergeh(temp3, temp11);
+ tempH = vec_mergel(temp3, temp11);
+ tempI = vec_mergeh(temp4, temp12);
+ tempJ = vec_mergel(temp4, temp12);
+ tempK = vec_mergeh(temp5, temp13);
+ tempL = vec_mergel(temp5, temp13);
+ tempM = vec_mergeh(temp6, temp14);
+ tempN = vec_mergel(temp6, temp14);
+ tempO = vec_mergeh(temp7, temp15);
+ tempP = vec_mergel(temp7, temp15);
+
+ temp0 = vec_mergeh(tempA, tempI);
+ temp1 = vec_mergel(tempA, tempI);
+ temp2 = vec_mergeh(tempB, tempJ);
+ temp3 = vec_mergel(tempB, tempJ);
+ temp4 = vec_mergeh(tempC, tempK);
+ temp5 = vec_mergel(tempC, tempK);
+ temp6 = vec_mergeh(tempD, tempL);
+ temp7 = vec_mergel(tempD, tempL);
+ temp8 = vec_mergeh(tempE, tempM);
+ temp9 = vec_mergel(tempE, tempM);
+ temp10 = vec_mergeh(tempF, tempN);
+ temp11 = vec_mergel(tempF, tempN);
+ temp12 = vec_mergeh(tempG, tempO);
+ temp13 = vec_mergel(tempG, tempO);
+ temp14 = vec_mergeh(tempH, tempP);
+ temp15 = vec_mergel(tempH, tempP);
+
+ vec_st(temp0, 0, dst);
+ vec_st(temp1, 16, dst);
+ vec_st(temp2, 32, dst);
+ vec_st(temp3, 48, dst);
+ vec_st(temp4, 64, dst);
+ vec_st(temp5, 80, dst);
+ vec_st(temp6, 96, dst);
+ vec_st(temp7, 112, dst);
+ vec_st(temp8, 128, dst);
+ vec_st(temp9, 144, dst);
+ vec_st(temp10, 160, dst);
+ vec_st(temp11, 176, dst);
+ vec_st(temp12, 192, dst);
+ vec_st(temp13, 208, dst);
+ vec_st(temp14, 224, dst);
+ vec_st(temp15, 240, dst);
+}
+
+static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
+ const vector unsigned char zero = vec_splat_u8(0);
+ const vector unsigned char magic_perm = (const vector unsigned char)
+ AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
+
+#define LOAD_DOUBLE_LINE(i, j) \
+ vector unsigned char src##i = vec_ld(i * 16, src); \
+ vector unsigned char src##j = vec_ld(j * 16, src)
+
+ LOAD_DOUBLE_LINE(0, 1);
+ LOAD_DOUBLE_LINE(2, 3);
+ LOAD_DOUBLE_LINE(4, 5);
+ LOAD_DOUBLE_LINE(6, 7);
+ LOAD_DOUBLE_LINE(8, 9);
+ LOAD_DOUBLE_LINE(10, 11);
+ LOAD_DOUBLE_LINE(12, 13);
+ LOAD_DOUBLE_LINE(14, 15);
+#undef LOAD_DOUBLE_LINE
+
+ vector unsigned char tempA = vec_mergeh(src0, src8);
+ vector unsigned char tempB;
+ vector unsigned char tempC = vec_mergeh(src1, src9);
+ vector unsigned char tempD;
+ vector unsigned char tempE = vec_mergeh(src2, src10);
+ vector unsigned char tempG = vec_mergeh(src3, src11);
+ vector unsigned char tempI = vec_mergeh(src4, src12);
+ vector unsigned char tempJ;
+ vector unsigned char tempK = vec_mergeh(src5, src13);
+ vector unsigned char tempL;
+ vector unsigned char tempM = vec_mergeh(src6, src14);
+ vector unsigned char tempO = vec_mergeh(src7, src15);
+
+ vector unsigned char temp0 = vec_mergeh(tempA, tempI);
+ vector unsigned char temp1 = vec_mergel(tempA, tempI);
+ vector unsigned char temp2;
+ vector unsigned char temp3;
+ vector unsigned char temp4 = vec_mergeh(tempC, tempK);
+ vector unsigned char temp5 = vec_mergel(tempC, tempK);
+ vector unsigned char temp6;
+ vector unsigned char temp7;
+ vector unsigned char temp8 = vec_mergeh(tempE, tempM);
+ vector unsigned char temp9 = vec_mergel(tempE, tempM);
+ vector unsigned char temp12 = vec_mergeh(tempG, tempO);
+ vector unsigned char temp13 = vec_mergel(tempG, tempO);
+
+ tempA = vec_mergeh(temp0, temp8);
+ tempB = vec_mergel(temp0, temp8);
+ tempC = vec_mergeh(temp1, temp9);
+ tempD = vec_mergel(temp1, temp9);
+ tempI = vec_mergeh(temp4, temp12);
+ tempJ = vec_mergel(temp4, temp12);
+ tempK = vec_mergeh(temp5, temp13);
+ tempL = vec_mergel(temp5, temp13);
+
+ temp0 = vec_mergeh(tempA, tempI);
+ temp1 = vec_mergel(tempA, tempI);
+ temp2 = vec_mergeh(tempB, tempJ);
+ temp3 = vec_mergel(tempB, tempJ);
+ temp4 = vec_mergeh(tempC, tempK);
+ temp5 = vec_mergel(tempC, tempK);
+ temp6 = vec_mergeh(tempD, tempL);
+ temp7 = vec_mergel(tempD, tempL);
+
+
+ const vector signed char neg1 = vec_splat_s8(-1);
+#define STORE_DOUBLE_LINE(i, j) \
+ vector unsigned char dstA##i = vec_ld(i * stride, dst); \
+ vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \
+ vector unsigned char dstA##j = vec_ld(j * stride, dst); \
+ vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \
+ vector unsigned char align##i = vec_lvsr(i * stride, dst); \
+ vector unsigned char align##j = vec_lvsr(j * stride, dst); \
+ vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \
+ vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \
+ vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i); \
+ vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j); \
+ vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \
+ vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \
+ vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \
+ vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \
+ vec_st(dstAF##i, i * stride, dst); \
+ vec_st(dstBF##i, i * stride + 16, dst); \
+ vec_st(dstAF##j, j * stride, dst); \
+ vec_st(dstBF##j, j * stride + 16, dst)
+
+ STORE_DOUBLE_LINE(0,1);
+ STORE_DOUBLE_LINE(2,3);
+ STORE_DOUBLE_LINE(4,5);
+ STORE_DOUBLE_LINE(6,7);
+}
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h b/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h
index db50fa3b5..01d4679ad 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h
+++ b/src/libffmpeg/libavcodec/libpostproc/postprocess_internal.h
@@ -37,9 +37,11 @@
// Experimental vertical filters
#define V_X1_FILTER 0x0200 // 512
+#define V_A_DEBLOCK 0x0400
// Experimental horizontal filters
#define H_X1_FILTER 0x2000 // 8192
+#define H_A_DEBLOCK 0x4000
/// select between full y range (255-0) or standart one (234-16)
#define FULL_Y_RANGE 0x8000 // 32768
@@ -158,3 +160,11 @@ typedef struct PPContext{
} PPContext;
+static inline void linecpy(void *dest, void *src, int lines, int stride)
+{
+ if (stride > 0) {
+ memcpy(dest, src, lines*stride);
+ } else {
+ memcpy(dest+(lines-1)*stride, src+(lines-1)*stride, -lines*stride);
+ }
+}
diff --git a/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c b/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c
index 4e81bd556..d1307caca 100644
--- a/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c
+++ b/src/libffmpeg/libavcodec/libpostproc/postprocess_template.c
@@ -22,15 +22,37 @@
*/
+#ifdef ARCH_X86_64
+# define REGa rax
+# define REGc rcx
+# define REGd rdx
+# define REG_a "rax"
+# define REG_c "rcx"
+# define REG_d "rdx"
+# define REG_SP "rsp"
+# define ALIGN_MASK "$0xFFFFFFFFFFFFFFF8"
+#else
+# define REGa eax
+# define REGc ecx
+# define REGd edx
+# define REG_a "eax"
+# define REG_c "ecx"
+# define REG_d "edx"
+# define REG_SP "esp"
+# define ALIGN_MASK "$0xFFFFFFF8"
+#endif
+
+
#undef PAVGB
#undef PMINUB
#undef PMAXUB
#ifdef HAVE_MMX2
-#define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
+#define REAL_PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
#elif defined (HAVE_3DNOW)
-#define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
+#define REAL_PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
#endif
+#define PAVGB(a,b) REAL_PAVGB(a,b)
#ifdef HAVE_MMX2
#define PMINUB(a,b,t) "pminub " #a ", " #b " \n\t"
@@ -64,12 +86,12 @@ asm volatile(
);
asm volatile(
- "leal (%2, %3), %%eax \n\t"
+ "lea (%2, %3), %%"REG_a" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %1 eax eax+%2 eax+2%2 %1+4%2 ecx ecx+%2 ecx+2%2 %1+8%2 ecx+4%2
"movq (%2), %%mm0 \n\t"
- "movq (%%eax), %%mm1 \n\t"
+ "movq (%%"REG_a"), %%mm1 \n\t"
"movq %%mm0, %%mm3 \n\t"
"movq %%mm0, %%mm4 \n\t"
PMAXUB(%%mm1, %%mm4)
@@ -78,7 +100,7 @@ asm volatile(
"paddb %%mm7, %%mm0 \n\t"
"pcmpgtb %%mm6, %%mm0 \n\t"
- "movq (%%eax,%3), %%mm2 \n\t"
+ "movq (%%"REG_a",%3), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
@@ -86,7 +108,7 @@ asm volatile(
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
- "movq (%%eax, %3, 2), %%mm1 \n\t"
+ "movq (%%"REG_a", %3, 2), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
@@ -94,7 +116,7 @@ asm volatile(
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
- "leal (%%eax, %3, 4), %%eax \n\t"
+ "lea (%%"REG_a", %3, 4), %%"REG_a" \n\t"
"movq (%2, %3, 4), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
@@ -104,7 +126,7 @@ asm volatile(
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
- "movq (%%eax), %%mm1 \n\t"
+ "movq (%%"REG_a"), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
@@ -112,7 +134,7 @@ asm volatile(
"pcmpgtb %%mm6, %%mm2 \n\t"
"paddb %%mm2, %%mm0 \n\t"
- "movq (%%eax, %3), %%mm2 \n\t"
+ "movq (%%"REG_a", %3), %%mm2 \n\t"
PMAXUB(%%mm2, %%mm4)
PMINUB(%%mm2, %%mm3, %%mm5)
"psubb %%mm2, %%mm1 \n\t"
@@ -120,7 +142,7 @@ asm volatile(
"pcmpgtb %%mm6, %%mm1 \n\t"
"paddb %%mm1, %%mm0 \n\t"
- "movq (%%eax, %3, 2), %%mm1 \n\t"
+ "movq (%%"REG_a", %3, 2), %%mm1 \n\t"
PMAXUB(%%mm1, %%mm4)
PMINUB(%%mm1, %%mm3, %%mm5)
"psubb %%mm1, %%mm2 \n\t"
@@ -152,8 +174,8 @@ asm volatile(
"movd %%mm4, %1 \n\t"
: "=r" (numEq), "=r" (dcOk)
- : "r" (src), "r" (stride), "m" (c->pQPb)
- : "%eax"
+ : "r" (src), "r" ((long)stride), "m" (c->pQPb)
+ : "%"REG_a
);
numEq= (-numEq) &0xFF;
@@ -194,10 +216,10 @@ static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c)
"por %%mm2, %%mm6 \n\t"// First Line to Filter
"movq (%0, %1, 8), %%mm5 \n\t"
- "leal (%0, %1, 4), %%eax \n\t"
- "leal (%0, %1, 8), %%ecx \n\t"
- "subl %1, %%ecx \n\t"
- "addl %1, %0 \n\t" // %0 points to line 1 not 0
+ "lea (%0, %1, 4), %%"REG_a" \n\t"
+ "lea (%0, %1, 8), %%"REG_c" \n\t"
+ "sub %1, %%"REG_c" \n\t"
+ "add %1, %0 \n\t" // %0 points to line 1 not 0
"movq (%0, %1, 8), %%mm7 \n\t"
"movq %%mm5, %%mm1 \n\t"
"movq %%mm7, %%mm2 \n\t"
@@ -225,7 +247,7 @@ static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c)
"movq (%0, %1, 4), %%mm2 \n\t" // 1
"movq %%mm2, %%mm5 \n\t" // 1
- PAVGB((%%eax), %%mm2) // 11 /2
+ PAVGB((%%REGa), %%mm2) // 11 /2
PAVGB((%0, %1, 2), %%mm2) // 211 /4
"movq %%mm2, %%mm3 \n\t" // 211 /4
"movq (%0), %%mm4 \n\t" // 1
@@ -237,15 +259,15 @@ static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c)
PAVGB(%%mm6, %%mm0) //1 1 /2
"movq %%mm4, %%mm3 \n\t" // 1
PAVGB((%0,%1,2), %%mm3) // 1 1 /2
- PAVGB((%%eax,%1,2), %%mm5) // 11 /2
- PAVGB((%%eax), %%mm5) // 211 /4
+ PAVGB((%%REGa,%1,2), %%mm5) // 11 /2
+ PAVGB((%%REGa), %%mm5) // 211 /4
PAVGB(%%mm5, %%mm3) // 2 2211 /8
PAVGB(%%mm0, %%mm3) //4242211 /16
"movq %%mm3, (%0,%1) \n\t" // X
// mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
PAVGB(%%mm4, %%mm6) //11 /2
- "movq (%%ecx), %%mm0 \n\t" // 1
- PAVGB((%%eax, %1, 2), %%mm0) // 11/2
+ "movq (%%"REG_c"), %%mm0 \n\t" // 1
+ PAVGB((%%REGa, %1, 2), %%mm0) // 11/2
"movq %%mm0, %%mm3 \n\t" // 11/2
PAVGB(%%mm1, %%mm0) // 2 11/4
PAVGB(%%mm6, %%mm0) //222 11/8
@@ -253,17 +275,17 @@ static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c)
"movq (%0, %1, 2), %%mm2 \n\t" // 1
"movq %%mm0, (%0, %1, 2) \n\t" // X
// mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
- "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
- PAVGB((%%ecx), %%mm0) // 11 /2
+ "movq (%%"REG_a", %1, 4), %%mm0 \n\t" // 1
+ PAVGB((%%REGc), %%mm0) // 11 /2
PAVGB(%%mm0, %%mm6) //11 11 /4
PAVGB(%%mm1, %%mm4) // 11 /2
PAVGB(%%mm2, %%mm1) // 11 /2
PAVGB(%%mm1, %%mm6) //1122 11 /8
PAVGB(%%mm5, %%mm6) //112242211 /16
- "movq (%%eax), %%mm5 \n\t" // 1
- "movq %%mm6, (%%eax) \n\t" // X
+ "movq (%%"REG_a"), %%mm5 \n\t" // 1
+ "movq %%mm6, (%%"REG_a") \n\t" // X
// mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
- "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t" // 1
PAVGB(%%mm7, %%mm6) // 11 /2
PAVGB(%%mm4, %%mm6) // 11 11 /4
PAVGB(%%mm3, %%mm6) // 11 2211 /8
@@ -276,29 +298,29 @@ static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c)
PAVGB(%%mm7, %%mm1) // 11 2 /4
PAVGB(%%mm4, %%mm5) // 11 /2
PAVGB(%%mm5, %%mm0) // 11 11 /4
- "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
+ "movq (%%"REG_a", %1, 2), %%mm6 \n\t" // 1
PAVGB(%%mm6, %%mm1) // 11 4 2 /8
PAVGB(%%mm0, %%mm1) // 11224222 /16
- "movq %%mm1, (%%eax, %1, 2) \n\t" // X
+ "movq %%mm1, (%%"REG_a", %1, 2) \n\t" // X
// mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
- PAVGB((%%ecx), %%mm2) // 112 4 /8
- "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
+ PAVGB((%%REGc), %%mm2) // 112 4 /8
+ "movq (%%"REG_a", %1, 4), %%mm0 \n\t" // 1
PAVGB(%%mm0, %%mm6) // 1 1 /2
PAVGB(%%mm7, %%mm6) // 1 12 /4
PAVGB(%%mm2, %%mm6) // 1122424 /4
- "movq %%mm6, (%%ecx) \n\t" // X
+ "movq %%mm6, (%%"REG_c") \n\t" // X
// mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
PAVGB(%%mm7, %%mm5) // 11 2 /4
PAVGB(%%mm7, %%mm5) // 11 6 /8
PAVGB(%%mm3, %%mm0) // 112 /4
PAVGB(%%mm0, %%mm5) // 112246 /16
- "movq %%mm5, (%%eax, %1, 4) \n\t" // X
- "subl %1, %0 \n\t"
+ "movq %%mm5, (%%"REG_a", %1, 4) \n\t" // X
+ "sub %1, %0 \n\t"
:
- : "r" (src), "r" (stride), "m" (c->pQPb)
- : "%eax", "%ecx"
+ : "r" (src), "r" ((long)stride), "m" (c->pQPb)
+ : "%"REG_a, "%"REG_c
);
#else
const int l1= stride;
@@ -317,25 +339,26 @@ static inline void RENAME(doVertLowPass)(uint8_t *src, int stride, PPContext *c)
const int first= ABS(src[0] - src[l1]) < c->QP ? src[0] : src[l1];
const int last= ABS(src[l8] - src[l9]) < c->QP ? src[l9] : src[l8];
- int sums[9];
- sums[0] = first + src[l1];
- sums[1] = src[l1] + src[l2];
- sums[2] = src[l2] + src[l3];
- sums[3] = src[l3] + src[l4];
- sums[4] = src[l4] + src[l5];
- sums[5] = src[l5] + src[l6];
- sums[6] = src[l6] + src[l7];
- sums[7] = src[l7] + src[l8];
- sums[8] = src[l8] + last;
-
- src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
- src[l2]= ((src[l2]<<2) + ((first + sums[0] + sums[3])<<1) + sums[5] + 8)>>4;
- src[l3]= ((src[l3]<<2) + ((first + sums[1] + sums[4])<<1) + sums[6] + 8)>>4;
- src[l4]= ((src[l4]<<2) + ((sums[2] + sums[5])<<1) + sums[0] + sums[7] + 8)>>4;
- src[l5]= ((src[l5]<<2) + ((sums[3] + sums[6])<<1) + sums[1] + sums[8] + 8)>>4;
- src[l6]= ((src[l6]<<2) + ((last + sums[7] + sums[4])<<1) + sums[2] + 8)>>4;
- src[l7]= (((last + src[l7])<<2) + ((src[l8] + sums[5])<<1) + sums[3] + 8)>>4;
- src[l8]= ((sums[8]<<2) + ((last + sums[6])<<1) + sums[4] + 8)>>4;
+ int sums[10];
+ sums[0] = 4*first + src[l1] + src[l2] + src[l3] + 4;
+ sums[1] = sums[0] - first + src[l4];
+ sums[2] = sums[1] - first + src[l5];
+ sums[3] = sums[2] - first + src[l6];
+ sums[4] = sums[3] - first + src[l7];
+ sums[5] = sums[4] - src[l1] + src[l8];
+ sums[6] = sums[5] - src[l2] + last;
+ sums[7] = sums[6] - src[l3] + last;
+ sums[8] = sums[7] - src[l4] + last;
+ sums[9] = sums[8] - src[l5] + last;
+
+ src[l1]= (sums[0] + sums[2] + 2*src[l1])>>4;
+ src[l2]= (sums[1] + sums[3] + 2*src[l2])>>4;
+ src[l3]= (sums[2] + sums[4] + 2*src[l3])>>4;
+ src[l4]= (sums[3] + sums[5] + 2*src[l4])>>4;
+ src[l5]= (sums[4] + sums[6] + 2*src[l5])>>4;
+ src[l6]= (sums[5] + sums[7] + 2*src[l6])>>4;
+ src[l7]= (sums[6] + sums[8] + 2*src[l7])>>4;
+ src[l8]= (sums[7] + sums[9] + 2*src[l8])>>4;
src++;
}
@@ -363,8 +386,8 @@ static inline void RENAME(vertRK1Filter)(uint8_t *src, int stride, int QP)
asm volatile(
"pxor %%mm7, %%mm7 \n\t" // 0
"movq "MANGLE(b80)", %%mm6 \n\t" // MIN_SIGNED_BYTE
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%ecx \n\t"
+ "leal (%0, %1), %%"REG_a" \n\t"
+ "leal (%%"REG_a", %1, 4), %%"REG_c" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
"movq "MANGLE(pQPb)", %%mm0 \n\t" // QP,..., QP
@@ -374,7 +397,7 @@ static inline void RENAME(vertRK1Filter)(uint8_t *src, int stride, int QP)
"pand "MANGLE(b3F)", %%mm0 \n\t" // QP/4,..., QP/4
"paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
"movq (%0, %1, 4), %%mm2 \n\t" // line 4
- "movq (%%ecx), %%mm3 \n\t" // line 5
+ "movq (%%"REG_c"), %%mm3 \n\t" // line 5
"movq %%mm2, %%mm4 \n\t" // line 4
"pcmpeqb %%mm5, %%mm5 \n\t" // -1
"pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
@@ -392,32 +415,32 @@ static inline void RENAME(vertRK1Filter)(uint8_t *src, int stride, int QP)
// "psubb %%mm6, %%mm2 \n\t"
"movq %%mm2, (%0,%1, 4) \n\t"
- "movq (%%ecx), %%mm2 \n\t"
+ "movq (%%"REG_c"), %%mm2 \n\t"
// "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
"psubb %%mm5, %%mm2 \n\t"
// "psubb %%mm6, %%mm2 \n\t"
- "movq %%mm2, (%%ecx) \n\t"
+ "movq %%mm2, (%%"REG_c") \n\t"
"paddb %%mm6, %%mm5 \n\t"
"psrlw $2, %%mm5 \n\t"
"pand "MANGLE(b3F)", %%mm5 \n\t"
"psubb "MANGLE(b20)", %%mm5 \n\t" // (l5-l4)/8
- "movq (%%eax, %1, 2), %%mm2 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm2 \n\t"
"paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
"paddsb %%mm5, %%mm2 \n\t"
"psubb %%mm6, %%mm2 \n\t"
- "movq %%mm2, (%%eax, %1, 2) \n\t"
+ "movq %%mm2, (%%"REG_a", %1, 2) \n\t"
- "movq (%%ecx, %1), %%mm2 \n\t"
+ "movq (%%"REG_c", %1), %%mm2 \n\t"
"paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
"psubsb %%mm5, %%mm2 \n\t"
"psubb %%mm6, %%mm2 \n\t"
- "movq %%mm2, (%%ecx, %1) \n\t"
+ "movq %%mm2, (%%"REG_c", %1) \n\t"
:
- : "r" (src), "r" (stride)
- : "%eax", "%ecx"
+ : "r" (src), "r" ((long)stride)
+ : "%"REG_a, "%"REG_c
);
#else
const int l1= stride;
@@ -463,18 +486,18 @@ static inline void RENAME(vertX1Filter)(uint8_t *src, int stride, PPContext *co)
asm volatile(
"pxor %%mm7, %%mm7 \n\t" // 0
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%ecx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
- "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t" // line 3
"movq (%0, %1, 4), %%mm1 \n\t" // line 4
"movq %%mm1, %%mm2 \n\t" // line 4
"psubusb %%mm0, %%mm1 \n\t"
"psubusb %%mm2, %%mm0 \n\t"
"por %%mm1, %%mm0 \n\t" // |l2 - l3|
- "movq (%%ecx), %%mm3 \n\t" // line 5
- "movq (%%ecx, %1), %%mm4 \n\t" // line 6
+ "movq (%%"REG_c"), %%mm3 \n\t" // line 5
+ "movq (%%"REG_c", %1), %%mm4 \n\t" // line 6
"movq %%mm3, %%mm5 \n\t" // line 5
"psubusb %%mm4, %%mm3 \n\t"
"psubusb %%mm5, %%mm4 \n\t"
@@ -506,43 +529,43 @@ static inline void RENAME(vertX1Filter)(uint8_t *src, int stride, PPContext *co)
"pxor %%mm2, %%mm0 \n\t"
"movq %%mm0, (%0, %1, 4) \n\t" // line 4
- "movq (%%ecx), %%mm0 \n\t" // line 5
+ "movq (%%"REG_c"), %%mm0 \n\t" // line 5
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
"paddusb %%mm3, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
- "movq %%mm0, (%%ecx) \n\t" // line 5
+ "movq %%mm0, (%%"REG_c") \n\t" // line 5
PAVGB(%%mm7, %%mm1) // d/4
- "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t" // line 3
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
"psubusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
- "movq %%mm0, (%%eax, %1, 2) \n\t" // line 3
+ "movq %%mm0, (%%"REG_a", %1, 2) \n\t" // line 3
- "movq (%%ecx, %1), %%mm0 \n\t" // line 6
+ "movq (%%"REG_c", %1), %%mm0 \n\t" // line 6
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
"paddusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
- "movq %%mm0, (%%ecx, %1) \n\t" // line 6
+ "movq %%mm0, (%%"REG_c", %1) \n\t" // line 6
PAVGB(%%mm7, %%mm1) // d/8
- "movq (%%eax, %1), %%mm0 \n\t" // line 2
+ "movq (%%"REG_a", %1), %%mm0 \n\t" // line 2
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
"psubusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
- "movq %%mm0, (%%eax, %1) \n\t" // line 2
+ "movq %%mm0, (%%"REG_a", %1) \n\t" // line 2
- "movq (%%ecx, %1, 2), %%mm0 \n\t" // line 7
+ "movq (%%"REG_c", %1, 2), %%mm0 \n\t" // line 7
"pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
"paddusb %%mm1, %%mm0 \n\t"
"pxor %%mm2, %%mm0 \n\t"
- "movq %%mm0, (%%ecx, %1, 2) \n\t" // line 7
+ "movq %%mm0, (%%"REG_c", %1, 2) \n\t" // line 7
:
- : "r" (src), "r" (stride), "m" (co->pQPb)
- : "%eax", "%ecx"
+ : "r" (src), "r" ((long)stride), "m" (co->pQPb)
+ : "%"REG_a, "%"REG_c
);
#else
@@ -607,8 +630,8 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
#if 0 //sligtly more accurate and slightly slower
"pxor %%mm7, %%mm7 \n\t" // 0
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%ecx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
// 0 1 2 3 4 5 6 7
// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ecx+%1 ecx+2%1
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1
@@ -621,8 +644,8 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
PAVGB(%%mm1, %%mm0) // ~(l2 + 2l0)/4
PAVGB(%%mm2, %%mm0) // ~(5l2 + 2l0)/8
- "movq (%%eax), %%mm1 \n\t" // l1
- "movq (%%eax, %1, 2), %%mm3 \n\t" // l3
+ "movq (%%"REG_a"), %%mm1 \n\t" // l1
+ "movq (%%"REG_a", %1, 2), %%mm3 \n\t" // l3
"movq %%mm1, %%mm4 \n\t" // l1
PAVGB(%%mm7, %%mm1) // ~l1/2
PAVGB(%%mm3, %%mm1) // ~(l1 + 2l3)/4
@@ -640,7 +663,7 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
PAVGB(%%mm2, %%mm0) // ~(l4 + 2l2)/4
PAVGB(%%mm4, %%mm0) // ~(5l4 + 2l2)/8
- "movq (%%ecx), %%mm2 \n\t" // l5
+ "movq (%%"REG_c"), %%mm2 \n\t" // l5
"movq %%mm3, %%mm5 \n\t" // l3
PAVGB(%%mm7, %%mm3) // ~l3/2
PAVGB(%%mm2, %%mm3) // ~(l3 + 2l5)/4
@@ -653,13 +676,13 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
"pcmpeqb %%mm7, %%mm0 \n\t" // SIGN(2l2 - 5l3 + 5l4 - 2l5)
// mm0= SIGN(menergy), mm1= |lenergy|, mm2= l5, mm3= |menergy|, mm4=l4, mm5= l3, mm7=0
- "movq (%%ecx, %1), %%mm6 \n\t" // l6
+ "movq (%%"REG_c", %1), %%mm6 \n\t" // l6
"movq %%mm6, %%mm5 \n\t" // l6
PAVGB(%%mm7, %%mm6) // ~l6/2
PAVGB(%%mm4, %%mm6) // ~(l6 + 2l4)/4
PAVGB(%%mm5, %%mm6) // ~(5l6 + 2l4)/8
- "movq (%%ecx, %1, 2), %%mm5 \n\t" // l7
+ "movq (%%"REG_c", %1, 2), %%mm5 \n\t" // l7
"movq %%mm2, %%mm4 \n\t" // l5
PAVGB(%%mm7, %%mm2) // ~l5/2
PAVGB(%%mm5, %%mm2) // ~(l5 + 2l7)/4
@@ -686,7 +709,7 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
"paddusb %%mm1, %%mm3 \n\t"
// "paddusb "MANGLE(b01)", %%mm3 \n\t"
- "movq (%%eax, %1, 2), %%mm6 \n\t" //l3
+ "movq (%%"REG_a", %1, 2), %%mm6 \n\t" //l3
"movq (%0, %1, 4), %%mm5 \n\t" //l4
"movq (%0, %1, 4), %%mm4 \n\t" //l4
"psubusb %%mm6, %%mm5 \n\t"
@@ -700,7 +723,7 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
"psubusb "MANGLE(b01)", %%mm3 \n\t"
PAVGB(%%mm7, %%mm3)
- "movq (%%eax, %1, 2), %%mm0 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
"movq (%0, %1, 4), %%mm2 \n\t"
"pxor %%mm6, %%mm0 \n\t"
"pxor %%mm6, %%mm2 \n\t"
@@ -708,36 +731,36 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
"paddb %%mm3, %%mm2 \n\t"
"pxor %%mm6, %%mm0 \n\t"
"pxor %%mm6, %%mm2 \n\t"
- "movq %%mm0, (%%eax, %1, 2) \n\t"
+ "movq %%mm0, (%%"REG_a", %1, 2) \n\t"
"movq %%mm2, (%0, %1, 4) \n\t"
#endif
- "leal (%0, %1), %%eax \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
"pcmpeqb %%mm6, %%mm6 \n\t" // -1
// 0 1 2 3 4 5 6 7
// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ecx+%1 ecx+2%1
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1
- "movq (%%eax, %1, 2), %%mm1 \n\t" // l3
+ "movq (%%"REG_a", %1, 2), %%mm1 \n\t" // l3
"movq (%0, %1, 4), %%mm0 \n\t" // l4
"pxor %%mm6, %%mm1 \n\t" // -l3-1
PAVGB(%%mm1, %%mm0) // -q+128 = (l4-l3+256)/2
// mm1=-l3-1, mm0=128-q
- "movq (%%eax, %1, 4), %%mm2 \n\t" // l5
- "movq (%%eax, %1), %%mm3 \n\t" // l2
+ "movq (%%"REG_a", %1, 4), %%mm2 \n\t" // l5
+ "movq (%%"REG_a", %1), %%mm3 \n\t" // l2
"pxor %%mm6, %%mm2 \n\t" // -l5-1
"movq %%mm2, %%mm5 \n\t" // -l5-1
"movq "MANGLE(b80)", %%mm4 \n\t" // 128
- "leal (%%eax, %1, 4), %%ecx \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
PAVGB(%%mm3, %%mm2) // (l2-l5+256)/2
PAVGB(%%mm0, %%mm4) // ~(l4-l3)/4 + 128
PAVGB(%%mm2, %%mm4) // ~(l2-l5)/4 +(l4-l3)/8 + 128
PAVGB(%%mm0, %%mm4) // ~(l2-l5)/8 +5(l4-l3)/16 + 128
// mm1=-l3-1, mm0=128-q, mm3=l2, mm4=menergy/16 + 128, mm5= -l5-1
- "movq (%%eax), %%mm2 \n\t" // l1
+ "movq (%%"REG_a"), %%mm2 \n\t" // l1
"pxor %%mm6, %%mm2 \n\t" // -l1-1
PAVGB(%%mm3, %%mm2) // (l2-l1+256)/2
PAVGB((%0), %%mm1) // (l0-l3+256)/2
@@ -747,8 +770,8 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
PAVGB(%%mm2, %%mm3) // ~(l0-l3)/8 +5(l2-l1)/16 + 128
// mm0=128-q, mm3=lenergy/16 + 128, mm4= menergy/16 + 128, mm5= -l5-1
- PAVGB((%%ecx, %1), %%mm5) // (l6-l5+256)/2
- "movq (%%ecx, %1, 2), %%mm1 \n\t" // l7
+ PAVGB((%%REGc, %1), %%mm5) // (l6-l5+256)/2
+ "movq (%%"REG_c", %1, 2), %%mm1 \n\t" // l7
"pxor %%mm6, %%mm1 \n\t" // -l7-1
PAVGB((%0, %1, 4), %%mm1) // (l4-l7+256)/2
"movq "MANGLE(b80)", %%mm2 \n\t" // 128
@@ -797,7 +820,7 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
"pxor %%mm1, %%mm7 \n\t" // SIGN(d*q)
"pand %%mm7, %%mm4 \n\t"
- "movq (%%eax, %1, 2), %%mm0 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
"movq (%0, %1, 4), %%mm2 \n\t"
"pxor %%mm1, %%mm0 \n\t"
"pxor %%mm1, %%mm2 \n\t"
@@ -805,12 +828,12 @@ static inline void RENAME(doVertDefFilter)(uint8_t src[], int stride, PPContext
"psubb %%mm4, %%mm2 \n\t"
"pxor %%mm1, %%mm0 \n\t"
"pxor %%mm1, %%mm2 \n\t"
- "movq %%mm0, (%%eax, %1, 2) \n\t"
+ "movq %%mm0, (%%"REG_a", %1, 2) \n\t"
"movq %%mm2, (%0, %1, 4) \n\t"
:
- : "r" (src), "r" (stride), "m" (c->pQPb)
- : "%eax", "%ecx"
+ : "r" (src), "r" ((long)stride), "m" (c->pQPb)
+ : "%"REG_a, "%"REG_c
);
/*
@@ -881,8 +904,8 @@ src-=8;
src+= stride*4;
asm volatile(
"pxor %%mm7, %%mm7 \n\t"
- "leal -40(%%esp), %%ecx \n\t" // make space for 4 8-byte vars
- "andl $0xFFFFFFF8, %%ecx \n\t" // align
+ "lea -40(%%"REG_SP"), %%"REG_c" \n\t" // make space for 4 8-byte vars
+ "and "ALIGN_MASK", %%"REG_c" \n\t" // align
// 0 1 2 3 4 5 6 7
// %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 edx+%1 edx+2%1
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1
@@ -893,12 +916,12 @@ src-=8;
"punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
"movq (%0, %1), %%mm2 \n\t"
- "leal (%0, %1, 2), %%eax \n\t"
+ "lea (%0, %1, 2), %%"REG_a" \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
"punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
- "movq (%%eax), %%mm4 \n\t"
+ "movq (%%"REG_a"), %%mm4 \n\t"
"movq %%mm4, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
"punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
@@ -915,7 +938,7 @@ src-=8;
"psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
- "movq (%%eax, %1), %%mm2 \n\t"
+ "movq (%%"REG_a", %1), %%mm2 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t" // L3
"punpckhbw %%mm7, %%mm3 \n\t" // H3
@@ -924,24 +947,24 @@ src-=8;
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
"psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
"psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
- "movq %%mm0, (%%ecx) \n\t" // 2L0 - 5L1 + 5L2 - 2L3
- "movq %%mm1, 8(%%ecx) \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+ "movq %%mm0, (%%"REG_c") \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq %%mm1, 8(%%"REG_c") \n\t" // 2H0 - 5H1 + 5H2 - 2H3
- "movq (%%eax, %1, 2), %%mm0 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t" // L4
"punpckhbw %%mm7, %%mm1 \n\t" // H4
"psubw %%mm0, %%mm2 \n\t" // L3 - L4
"psubw %%mm1, %%mm3 \n\t" // H3 - H4
- "movq %%mm2, 16(%%ecx) \n\t" // L3 - L4
- "movq %%mm3, 24(%%ecx) \n\t" // H3 - H4
+ "movq %%mm2, 16(%%"REG_c") \n\t" // L3 - L4
+ "movq %%mm3, 24(%%"REG_c") \n\t" // H3 - H4
"paddw %%mm4, %%mm4 \n\t" // 2L2
"paddw %%mm5, %%mm5 \n\t" // 2H2
"psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
"psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
- "leal (%%eax, %1), %0 \n\t"
+ "lea (%%"REG_a", %1), %0 \n\t"
"psllw $2, %%mm2 \n\t" // 4L3 - 4L4
"psllw $2, %%mm3 \n\t" // 4H3 - 4H4
"psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
@@ -956,10 +979,10 @@ src-=8;
"psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
"psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
- "movq (%%eax, %1, 4), %%mm6 \n\t"
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t"
"punpcklbw %%mm7, %%mm6 \n\t" // L6
"psubw %%mm6, %%mm2 \n\t" // L5 - L6
- "movq (%%eax, %1, 4), %%mm6 \n\t"
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t"
"punpckhbw %%mm7, %%mm6 \n\t" // H6
"psubw %%mm6, %%mm3 \n\t" // H5 - H6
@@ -983,8 +1006,8 @@ src-=8;
"psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
"psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
- "movq (%%ecx), %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
- "movq 8(%%ecx), %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+ "movq (%%"REG_c"), %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq 8(%%"REG_c"), %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
#ifdef HAVE_MMX2
"movq %%mm7, %%mm6 \n\t" // 0
@@ -1030,6 +1053,9 @@ src-=8;
"psubw %%mm6, %%mm1 \n\t"
#endif
+ "movd %2, %%mm2 \n\t" // QP
+ "punpcklbw %%mm7, %%mm2 \n\t"
+
"movq %%mm7, %%mm6 \n\t" // 0
"pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
"pxor %%mm6, %%mm4 \n\t"
@@ -1038,7 +1064,6 @@ src-=8;
"pxor %%mm7, %%mm5 \n\t"
"psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
// 100 opcodes
- "movd %2, %%mm2 \n\t" // QP
"psllw $3, %%mm2 \n\t" // 8QP
"movq %%mm2, %%mm3 \n\t" // 8QP
"pcmpgtw %%mm4, %%mm2 \n\t"
@@ -1060,8 +1085,8 @@ src-=8;
"psrlw $6, %%mm4 \n\t"
"psrlw $6, %%mm5 \n\t"
- "movq 16(%%ecx), %%mm0 \n\t" // L3 - L4
- "movq 24(%%ecx), %%mm1 \n\t" // H3 - H4
+ "movq 16(%%"REG_c"), %%mm0 \n\t" // L3 - L4
+ "movq 24(%%"REG_c"), %%mm1 \n\t" // H3 - H4
"pxor %%mm2, %%mm2 \n\t"
"pxor %%mm3, %%mm3 \n\t"
@@ -1104,8 +1129,8 @@ src-=8;
"movq %%mm0, (%0, %1) \n\t"
: "+r" (src)
- : "r" (stride), "m" (c->pQPb)
- : "%eax", "%ecx"
+ : "r" ((long)stride), "m" (c->pQPb)
+ : "%"REG_a, "%"REG_c
);
#else
const int l1= stride;
@@ -1168,20 +1193,20 @@ static inline void RENAME(dering)(uint8_t src[], int stride, PPContext *c)
"packuswb %%mm0, %%mm0 \n\t"
"movq %%mm0, %3 \n\t"
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%edx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
#undef FIND_MIN_MAX
#ifdef HAVE_MMX2
-#define FIND_MIN_MAX(addr)\
+#define REAL_FIND_MIN_MAX(addr)\
"movq " #addr ", %%mm0 \n\t"\
"pminub %%mm0, %%mm7 \n\t"\
"pmaxub %%mm0, %%mm6 \n\t"
#else
-#define FIND_MIN_MAX(addr)\
+#define REAL_FIND_MIN_MAX(addr)\
"movq " #addr ", %%mm0 \n\t"\
"movq %%mm7, %%mm1 \n\t"\
"psubusb %%mm0, %%mm6 \n\t"\
@@ -1189,14 +1214,15 @@ static inline void RENAME(dering)(uint8_t src[], int stride, PPContext *c)
"psubusb %%mm0, %%mm1 \n\t"\
"psubb %%mm1, %%mm7 \n\t"
#endif
+#define FIND_MIN_MAX(addr) REAL_FIND_MIN_MAX(addr)
-FIND_MIN_MAX((%%eax))
-FIND_MIN_MAX((%%eax, %1))
-FIND_MIN_MAX((%%eax, %1, 2))
+FIND_MIN_MAX((%%REGa))
+FIND_MIN_MAX((%%REGa, %1))
+FIND_MIN_MAX((%%REGa, %1, 2))
FIND_MIN_MAX((%0, %1, 4))
-FIND_MIN_MAX((%%edx))
-FIND_MIN_MAX((%%edx, %1))
-FIND_MIN_MAX((%%edx, %1, 2))
+FIND_MIN_MAX((%%REGd))
+FIND_MIN_MAX((%%REGd, %1))
+FIND_MIN_MAX((%%REGd, %1, 2))
FIND_MIN_MAX((%0, %1, 8))
"movq %%mm7, %%mm4 \n\t"
@@ -1249,13 +1275,13 @@ FIND_MIN_MAX((%0, %1, 8))
"movd %%mm6, %%ecx \n\t"
"cmpb "MANGLE(deringThreshold)", %%cl \n\t"
" jb 1f \n\t"
- "leal -24(%%esp), %%ecx \n\t"
- "andl $0xFFFFFFF8, %%ecx \n\t"
+ "lea -24(%%"REG_SP"), %%"REG_c" \n\t"
+ "and "ALIGN_MASK", %%"REG_c" \n\t"
PAVGB(%%mm0, %%mm7) // a=(max + min)/2
"punpcklbw %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
"punpcklbw %%mm7, %%mm7 \n\t"
- "movq %%mm7, (%%ecx) \n\t"
+ "movq %%mm7, (%%"REG_c") \n\t"
"movq (%0), %%mm0 \n\t" // L10
"movq %%mm0, %%mm1 \n\t" // L10
@@ -1280,13 +1306,13 @@ FIND_MIN_MAX((%0, %1, 8))
"paddb %%mm2, %%mm0 \n\t"
"paddb %%mm3, %%mm0 \n\t"
- "movq (%%eax), %%mm2 \n\t" // L11
+ "movq (%%"REG_a"), %%mm2 \n\t" // L11
"movq %%mm2, %%mm3 \n\t" // L11
"movq %%mm2, %%mm4 \n\t" // L11
"psllq $8, %%mm3 \n\t"
"psrlq $8, %%mm4 \n\t"
- "movd -4(%%eax), %%mm5 \n\t"
- "movd 8(%%eax), %%mm6 \n\t"
+ "movd -4(%%"REG_a"), %%mm5 \n\t"
+ "movd 8(%%"REG_a"), %%mm6 \n\t"
"psrlq $24, %%mm5 \n\t"
"psllq $56, %%mm6 \n\t"
"por %%mm5, %%mm3 \n\t" // L01
@@ -1303,7 +1329,7 @@ FIND_MIN_MAX((%0, %1, 8))
"paddb %%mm4, %%mm2 \n\t"
"paddb %%mm5, %%mm2 \n\t"
// 0, 2, 3, 1
-#define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
+#define REAL_DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
"movq " #src ", " #sx " \n\t" /* src[0] */\
"movq " #sx ", " #lx " \n\t" /* src[0] */\
"movq " #sx ", " #t0 " \n\t" /* src[0] */\
@@ -1319,8 +1345,8 @@ FIND_MIN_MAX((%0, %1, 8))
PAVGB(t0, lx) /* (src[-1] + src[+1])/2 */\
PAVGB(sx, lx) /* (src[-1] + 2src[0] + src[+1])/4 */\
PAVGB(lx, pplx) \
- "movq " #lx ", 8(%%ecx) \n\t"\
- "movq (%%ecx), " #lx " \n\t"\
+ "movq " #lx ", 8(%%"REG_c") \n\t"\
+ "movq (%%"REG_c"), " #lx " \n\t"\
"psubusb " #lx ", " #t1 " \n\t"\
"psubusb " #lx ", " #t0 " \n\t"\
"psubusb " #lx ", " #sx " \n\t"\
@@ -1347,8 +1373,10 @@ FIND_MIN_MAX((%0, %1, 8))
"pandn " #dst ", " #ppsx " \n\t"\
"por " #pplx ", " #ppsx " \n\t"\
"movq " #ppsx ", " #dst " \n\t"\
- "movq 8(%%ecx), " #lx " \n\t"
+ "movq 8(%%"REG_c"), " #lx " \n\t"
+#define DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1) \
+ REAL_DERING_CORE(dst,src,ppsx,psx,sx,pplx,plx,lx,t0,t1)
/*
0000000
1111111
@@ -1365,18 +1393,18 @@ FIND_MIN_MAX((%0, %1, 8))
*/
//DERING_CORE(dst,src ,ppsx ,psx ,sx ,pplx ,plx ,lx ,t0 ,t1)
-DERING_CORE((%%eax),(%%eax, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
-DERING_CORE((%%eax, %1),(%%eax, %1, 2) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
-DERING_CORE((%%eax, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
-DERING_CORE((%0, %1, 4),(%%edx) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
-DERING_CORE((%%edx),(%%edx, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
-DERING_CORE((%%edx, %1), (%%edx, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
-DERING_CORE((%%edx, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
-DERING_CORE((%0, %1, 8),(%%edx, %1, 4) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
+DERING_CORE((%%REGa),(%%REGa, %1) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
+DERING_CORE((%%REGa, %1),(%%REGa, %1, 2) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
+DERING_CORE((%%REGa, %1, 2),(%0, %1, 4) ,%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
+DERING_CORE((%0, %1, 4),(%%REGd) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
+DERING_CORE((%%REGd),(%%REGd, %1) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
+DERING_CORE((%%REGd, %1), (%%REGd, %1, 2),%%mm4,%%mm0,%%mm2,%%mm5,%%mm1,%%mm3,%%mm6,%%mm7)
+DERING_CORE((%%REGd, %1, 2),(%0, %1, 8) ,%%mm0,%%mm2,%%mm4,%%mm1,%%mm3,%%mm5,%%mm6,%%mm7)
+DERING_CORE((%0, %1, 8),(%%REGd, %1, 4) ,%%mm2,%%mm4,%%mm0,%%mm3,%%mm5,%%mm1,%%mm6,%%mm7)
"1: \n\t"
- : : "r" (src), "r" (stride), "m" (c->pQPb), "m"(c->pQPb2)
- : "%eax", "%edx", "%ecx"
+ : : "r" (src), "r" ((long)stride), "m" (c->pQPb), "m"(c->pQPb2)
+ : "%"REG_a, "%"REG_d, "%"REG_c
);
#else
int y;
@@ -1523,27 +1551,27 @@ static inline void RENAME(deInterlaceInterpolateLinear)(uint8_t src[], int strid
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
src+= 4*stride;
asm volatile(
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%ecx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_c" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %0+8%1 ecx+4%1
"movq (%0), %%mm0 \n\t"
- "movq (%%eax, %1), %%mm1 \n\t"
+ "movq (%%"REG_a", %1), %%mm1 \n\t"
PAVGB(%%mm1, %%mm0)
- "movq %%mm0, (%%eax) \n\t"
+ "movq %%mm0, (%%"REG_a") \n\t"
"movq (%0, %1, 4), %%mm0 \n\t"
PAVGB(%%mm0, %%mm1)
- "movq %%mm1, (%%eax, %1, 2) \n\t"
- "movq (%%ecx, %1), %%mm1 \n\t"
+ "movq %%mm1, (%%"REG_a", %1, 2) \n\t"
+ "movq (%%"REG_c", %1), %%mm1 \n\t"
PAVGB(%%mm1, %%mm0)
- "movq %%mm0, (%%ecx) \n\t"
+ "movq %%mm0, (%%"REG_c") \n\t"
"movq (%0, %1, 8), %%mm0 \n\t"
PAVGB(%%mm0, %%mm1)
- "movq %%mm1, (%%ecx, %1, 2) \n\t"
+ "movq %%mm1, (%%"REG_c", %1, 2) \n\t"
- : : "r" (src), "r" (stride)
- : "%eax", "%ecx"
+ : : "r" (src), "r" ((long)stride)
+ : "%"REG_a, "%"REG_c
);
#else
int a, b, x;
@@ -1576,15 +1604,15 @@ static inline void RENAME(deInterlaceInterpolateCubic)(uint8_t src[], int stride
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
src+= stride*3;
asm volatile(
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%edx \n\t"
- "leal (%%edx, %1, 4), %%ecx \n\t"
- "addl %1, %%ecx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
+ "lea (%%"REG_d", %1, 4), %%"REG_c" \n\t"
+ "add %1, %%"REG_c" \n\t"
"pxor %%mm7, %%mm7 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
-#define DEINT_CUBIC(a,b,c,d,e)\
+#define REAL_DEINT_CUBIC(a,b,c,d,e)\
"movq " #a ", %%mm0 \n\t"\
"movq " #b ", %%mm1 \n\t"\
"movq " #d ", %%mm2 \n\t"\
@@ -1605,14 +1633,15 @@ static inline void RENAME(deInterlaceInterpolateCubic)(uint8_t src[], int stride
"psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
"packuswb %%mm3, %%mm1 \n\t"\
"movq %%mm1, " #c " \n\t"
+#define DEINT_CUBIC(a,b,c,d,e) REAL_DEINT_CUBIC(a,b,c,d,e)
-DEINT_CUBIC((%0), (%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%edx, %1))
-DEINT_CUBIC((%%eax, %1), (%0, %1, 4), (%%edx), (%%edx, %1), (%0, %1, 8))
-DEINT_CUBIC((%0, %1, 4), (%%edx, %1), (%%edx, %1, 2), (%0, %1, 8), (%%ecx))
-DEINT_CUBIC((%%edx, %1), (%0, %1, 8), (%%edx, %1, 4), (%%ecx), (%%ecx, %1, 2))
+DEINT_CUBIC((%0), (%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4), (%%REGd, %1))
+DEINT_CUBIC((%%REGa, %1), (%0, %1, 4), (%%REGd), (%%REGd, %1), (%0, %1, 8))
+DEINT_CUBIC((%0, %1, 4), (%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8), (%%REGc))
+DEINT_CUBIC((%%REGd, %1), (%0, %1, 8), (%%REGd, %1, 4), (%%REGc), (%%REGc, %1, 2))
- : : "r" (src), "r" (stride)
- : "%eax", "%edx", "ecx"
+ : : "r" (src), "r" ((long)stride)
+ : "%"REG_a, "%"REG_d, "%"REG_c
);
#else
int x;
@@ -1640,14 +1669,14 @@ static inline void RENAME(deInterlaceFF)(uint8_t src[], int stride, uint8_t *tmp
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
src+= stride*4;
asm volatile(
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%edx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
"pxor %%mm7, %%mm7 \n\t"
"movq (%2), %%mm0 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
-#define DEINT_FF(a,b,c,d)\
+#define REAL_DEINT_FF(a,b,c,d)\
"movq " #a ", %%mm1 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"movq " #c ", %%mm3 \n\t"\
@@ -1675,14 +1704,16 @@ static inline void RENAME(deInterlaceFF)(uint8_t src[], int stride, uint8_t *tmp
"packuswb %%mm4, %%mm1 \n\t"\
"movq %%mm1, " #b " \n\t"\
-DEINT_FF((%0) , (%%eax) , (%%eax, %1), (%%eax, %1, 2))
-DEINT_FF((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%edx) )
-DEINT_FF((%0, %1, 4), (%%edx) , (%%edx, %1), (%%edx, %1, 2))
-DEINT_FF((%%edx, %1), (%%edx, %1, 2), (%0, %1, 8), (%%edx, %1, 4))
+#define DEINT_FF(a,b,c,d) REAL_DEINT_FF(a,b,c,d)
+
+DEINT_FF((%0) , (%%REGa) , (%%REGa, %1), (%%REGa, %1, 2))
+DEINT_FF((%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4), (%%REGd) )
+DEINT_FF((%0, %1, 4), (%%REGd) , (%%REGd, %1), (%%REGd, %1, 2))
+DEINT_FF((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8), (%%REGd, %1, 4))
"movq %%mm0, (%2) \n\t"
- : : "r" (src), "r" (stride), "r"(tmp)
- : "%eax", "%edx"
+ : : "r" (src), "r" ((long)stride), "r"(tmp)
+ : "%"REG_a, "%"REG_d
);
#else
int x;
@@ -1718,15 +1749,15 @@ static inline void RENAME(deInterlaceL5)(uint8_t src[], int stride, uint8_t *tmp
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
src+= stride*4;
asm volatile(
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%edx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
"pxor %%mm7, %%mm7 \n\t"
"movq (%2), %%mm0 \n\t"
"movq (%3), %%mm1 \n\t"
// 0 1 2 3 4 5 6 7 8 9 10
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1 ecx
-#define DEINT_L5(t1,t2,a,b,c)\
+#define REAL_DEINT_L5(t1,t2,a,b,c)\
"movq " #a ", %%mm2 \n\t"\
"movq " #b ", %%mm3 \n\t"\
"movq " #c ", %%mm4 \n\t"\
@@ -1759,19 +1790,21 @@ static inline void RENAME(deInterlaceL5)(uint8_t src[], int stride, uint8_t *tmp
"packuswb %%mm5, %%mm2 \n\t"\
"movq %%mm2, " #a " \n\t"\
-DEINT_L5(%%mm0, %%mm1, (%0) , (%%eax) , (%%eax, %1) )
-DEINT_L5(%%mm1, %%mm0, (%%eax) , (%%eax, %1) , (%%eax, %1, 2))
-DEINT_L5(%%mm0, %%mm1, (%%eax, %1) , (%%eax, %1, 2), (%0, %1, 4) )
-DEINT_L5(%%mm1, %%mm0, (%%eax, %1, 2), (%0, %1, 4) , (%%edx) )
-DEINT_L5(%%mm0, %%mm1, (%0, %1, 4) , (%%edx) , (%%edx, %1) )
-DEINT_L5(%%mm1, %%mm0, (%%edx) , (%%edx, %1) , (%%edx, %1, 2))
-DEINT_L5(%%mm0, %%mm1, (%%edx, %1) , (%%edx, %1, 2), (%0, %1, 8) )
-DEINT_L5(%%mm1, %%mm0, (%%edx, %1, 2), (%0, %1, 8) , (%%edx, %1, 4))
+#define DEINT_L5(t1,t2,a,b,c) REAL_DEINT_L5(t1,t2,a,b,c)
+
+DEINT_L5(%%mm0, %%mm1, (%0) , (%%REGa) , (%%REGa, %1) )
+DEINT_L5(%%mm1, %%mm0, (%%REGa) , (%%REGa, %1) , (%%REGa, %1, 2))
+DEINT_L5(%%mm0, %%mm1, (%%REGa, %1) , (%%REGa, %1, 2), (%0, %1, 4) )
+DEINT_L5(%%mm1, %%mm0, (%%REGa, %1, 2), (%0, %1, 4) , (%%REGd) )
+DEINT_L5(%%mm0, %%mm1, (%0, %1, 4) , (%%REGd) , (%%REGd, %1) )
+DEINT_L5(%%mm1, %%mm0, (%%REGd) , (%%REGd, %1) , (%%REGd, %1, 2))
+DEINT_L5(%%mm0, %%mm1, (%%REGd, %1) , (%%REGd, %1, 2), (%0, %1, 8) )
+DEINT_L5(%%mm1, %%mm0, (%%REGd, %1, 2), (%0, %1, 8) , (%%REGd, %1, 4))
"movq %%mm0, (%2) \n\t"
"movq %%mm1, (%3) \n\t"
- : : "r" (src), "r" (stride), "r"(tmp), "r"(tmp2)
- : "%eax", "%edx"
+ : : "r" (src), "r" ((long)stride), "r"(tmp), "r"(tmp2)
+ : "%"REG_a, "%"REG_d
);
#else
int x;
@@ -1818,49 +1851,49 @@ static inline void RENAME(deInterlaceBlendLinear)(uint8_t src[], int stride, uin
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
src+= 4*stride;
asm volatile(
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%edx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
"movq (%2), %%mm0 \n\t" // L0
- "movq (%%eax), %%mm1 \n\t" // L2
+ "movq (%%"REG_a"), %%mm1 \n\t" // L2
PAVGB(%%mm1, %%mm0) // L0+L2
"movq (%0), %%mm2 \n\t" // L1
PAVGB(%%mm2, %%mm0)
"movq %%mm0, (%0) \n\t"
- "movq (%%eax, %1), %%mm0 \n\t" // L3
+ "movq (%%"REG_a", %1), %%mm0 \n\t" // L3
PAVGB(%%mm0, %%mm2) // L1+L3
PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
- "movq %%mm2, (%%eax) \n\t"
- "movq (%%eax, %1, 2), %%mm2 \n\t" // L4
+ "movq %%mm2, (%%"REG_a") \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm2 \n\t" // L4
PAVGB(%%mm2, %%mm1) // L2+L4
PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
- "movq %%mm1, (%%eax, %1) \n\t"
+ "movq %%mm1, (%%"REG_a", %1) \n\t"
"movq (%0, %1, 4), %%mm1 \n\t" // L5
PAVGB(%%mm1, %%mm0) // L3+L5
PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
- "movq %%mm0, (%%eax, %1, 2) \n\t"
- "movq (%%edx), %%mm0 \n\t" // L6
+ "movq %%mm0, (%%"REG_a", %1, 2) \n\t"
+ "movq (%%"REG_d"), %%mm0 \n\t" // L6
PAVGB(%%mm0, %%mm2) // L4+L6
PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
"movq %%mm2, (%0, %1, 4) \n\t"
- "movq (%%edx, %1), %%mm2 \n\t" // L7
+ "movq (%%"REG_d", %1), %%mm2 \n\t" // L7
PAVGB(%%mm2, %%mm1) // L5+L7
PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
- "movq %%mm1, (%%edx) \n\t"
- "movq (%%edx, %1, 2), %%mm1 \n\t" // L8
+ "movq %%mm1, (%%"REG_d") \n\t"
+ "movq (%%"REG_d", %1, 2), %%mm1 \n\t" // L8
PAVGB(%%mm1, %%mm0) // L6+L8
PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
- "movq %%mm0, (%%edx, %1) \n\t"
+ "movq %%mm0, (%%"REG_d", %1) \n\t"
"movq (%0, %1, 8), %%mm0 \n\t" // L9
PAVGB(%%mm0, %%mm2) // L7+L9
PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
- "movq %%mm2, (%%edx, %1, 2) \n\t"
+ "movq %%mm2, (%%"REG_d", %1, 2) \n\t"
"movq %%mm1, (%2) \n\t"
- : : "r" (src), "r" (stride), "r" (tmp)
- : "%eax", "%edx"
+ : : "r" (src), "r" ((long)stride), "r" (tmp)
+ : "%"REG_a, "%"REG_d
);
#else
int a, b, c, x;
@@ -1920,62 +1953,62 @@ static inline void RENAME(deInterlaceMedian)(uint8_t src[], int stride)
src+= 4*stride;
#ifdef HAVE_MMX2
asm volatile(
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%edx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
"movq (%0), %%mm0 \n\t" //
- "movq (%%eax, %1), %%mm2 \n\t" //
- "movq (%%eax), %%mm1 \n\t" //
+ "movq (%%"REG_a", %1), %%mm2 \n\t" //
+ "movq (%%"REG_a"), %%mm1 \n\t" //
"movq %%mm0, %%mm3 \n\t"
"pmaxub %%mm1, %%mm0 \n\t" //
"pminub %%mm3, %%mm1 \n\t" //
"pmaxub %%mm2, %%mm1 \n\t" //
"pminub %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%%eax) \n\t"
+ "movq %%mm0, (%%"REG_a") \n\t"
"movq (%0, %1, 4), %%mm0 \n\t" //
- "movq (%%eax, %1, 2), %%mm1 \n\t" //
+ "movq (%%"REG_a", %1, 2), %%mm1 \n\t" //
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm1, %%mm2 \n\t" //
"pminub %%mm3, %%mm1 \n\t" //
"pmaxub %%mm0, %%mm1 \n\t" //
"pminub %%mm1, %%mm2 \n\t"
- "movq %%mm2, (%%eax, %1, 2) \n\t"
+ "movq %%mm2, (%%"REG_a", %1, 2) \n\t"
- "movq (%%edx), %%mm2 \n\t" //
- "movq (%%edx, %1), %%mm1 \n\t" //
+ "movq (%%"REG_d"), %%mm2 \n\t" //
+ "movq (%%"REG_d", %1), %%mm1 \n\t" //
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm0, %%mm2 \n\t" //
"pminub %%mm3, %%mm0 \n\t" //
"pmaxub %%mm1, %%mm0 \n\t" //
"pminub %%mm0, %%mm2 \n\t"
- "movq %%mm2, (%%edx) \n\t"
+ "movq %%mm2, (%%"REG_d") \n\t"
- "movq (%%edx, %1, 2), %%mm2 \n\t" //
+ "movq (%%"REG_d", %1, 2), %%mm2 \n\t" //
"movq (%0, %1, 8), %%mm0 \n\t" //
"movq %%mm2, %%mm3 \n\t"
"pmaxub %%mm0, %%mm2 \n\t" //
"pminub %%mm3, %%mm0 \n\t" //
"pmaxub %%mm1, %%mm0 \n\t" //
"pminub %%mm0, %%mm2 \n\t"
- "movq %%mm2, (%%edx, %1, 2) \n\t"
+ "movq %%mm2, (%%"REG_d", %1, 2) \n\t"
- : : "r" (src), "r" (stride)
- : "%eax", "%edx"
+ : : "r" (src), "r" ((long)stride)
+ : "%"REG_a, "%"REG_d
);
#else // MMX without MMX2
asm volatile(
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%edx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_d" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
"pxor %%mm7, %%mm7 \n\t"
-#define MEDIAN(a,b,c)\
+#define REAL_MEDIAN(a,b,c)\
"movq " #a ", %%mm0 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"movq " #c ", %%mm1 \n\t"\
@@ -1998,14 +2031,15 @@ static inline void RENAME(deInterlaceMedian)(uint8_t src[], int stride)
"pand %%mm2, %%mm0 \n\t"\
"pand %%mm1, %%mm0 \n\t"\
"movq %%mm0, " #b " \n\t"
+#define MEDIAN(a,b,c) REAL_MEDIAN(a,b,c)
-MEDIAN((%0), (%%eax), (%%eax, %1))
-MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
-MEDIAN((%0, %1, 4), (%%edx), (%%edx, %1))
-MEDIAN((%%edx, %1), (%%edx, %1, 2), (%0, %1, 8))
+MEDIAN((%0), (%%REGa), (%%REGa, %1))
+MEDIAN((%%REGa, %1), (%%REGa, %1, 2), (%0, %1, 4))
+MEDIAN((%0, %1, 4), (%%REGd), (%%REGd, %1))
+MEDIAN((%%REGd, %1), (%%REGd, %1, 2), (%0, %1, 8))
- : : "r" (src), "r" (stride)
- : "%eax", "%edx"
+ : : "r" (src), "r" ((long)stride)
+ : "%"REG_a, "%"REG_d
);
#endif // MMX
#else
@@ -2039,17 +2073,17 @@ MEDIAN((%%edx, %1), (%%edx, %1, 2), (%0, %1, 8))
static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src, int srcStride)
{
asm(
- "leal (%0, %1), %%eax \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
"movq (%0), %%mm0 \n\t" // 12345678
- "movq (%%eax), %%mm1 \n\t" // abcdefgh
+ "movq (%%"REG_a"), %%mm1 \n\t" // abcdefgh
"movq %%mm0, %%mm2 \n\t" // 12345678
"punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
"punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
- "movq (%%eax, %1), %%mm1 \n\t"
- "movq (%%eax, %1, 2), %%mm3 \n\t"
+ "movq (%%"REG_a", %1), %%mm1 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm3 \n\t"
"movq %%mm1, %%mm4 \n\t"
"punpcklbw %%mm3, %%mm1 \n\t"
"punpckhbw %%mm3, %%mm4 \n\t"
@@ -2076,16 +2110,16 @@ static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src
"psrlq $32, %%mm1 \n\t"
"movd %%mm1, 112(%3) \n\t"
- "leal (%%eax, %1, 4), %%eax \n\t"
+ "lea (%%"REG_a", %1, 4), %%"REG_a" \n\t"
"movq (%0, %1, 4), %%mm0 \n\t" // 12345678
- "movq (%%eax), %%mm1 \n\t" // abcdefgh
+ "movq (%%"REG_a"), %%mm1 \n\t" // abcdefgh
"movq %%mm0, %%mm2 \n\t" // 12345678
"punpcklbw %%mm1, %%mm0 \n\t" // 1a2b3c4d
"punpckhbw %%mm1, %%mm2 \n\t" // 5e6f7g8h
- "movq (%%eax, %1), %%mm1 \n\t"
- "movq (%%eax, %1, 2), %%mm3 \n\t"
+ "movq (%%"REG_a", %1), %%mm1 \n\t"
+ "movq (%%"REG_a", %1, 2), %%mm3 \n\t"
"movq %%mm1, %%mm4 \n\t"
"punpcklbw %%mm3, %%mm1 \n\t"
"punpckhbw %%mm3, %%mm4 \n\t"
@@ -2113,8 +2147,8 @@ static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src
"movd %%mm1, 116(%3) \n\t"
- :: "r" (src), "r" (srcStride), "r" (dst1), "r" (dst2)
- : "%eax"
+ :: "r" (src), "r" ((long)srcStride), "r" (dst1), "r" (dst2)
+ : "%"REG_a
);
}
@@ -2124,8 +2158,8 @@ static inline void RENAME(transpose1)(uint8_t *dst1, uint8_t *dst2, uint8_t *src
static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src)
{
asm(
- "leal (%0, %1), %%eax \n\t"
- "leal (%%eax, %1, 4), %%edx \n\t"
+ "lea (%0, %1), %%"REG_a" \n\t"
+ "lea (%%"REG_a",%1,4), %%"REG_d"\n\t"
// 0 1 2 3 4 5 6 7 8 9
// %0 eax eax+%1 eax+2%1 %0+4%1 edx edx+%1 edx+2%1 %0+8%1 edx+4%1
"movq (%2), %%mm0 \n\t" // 12345678
@@ -2149,16 +2183,16 @@ static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src)
"movd %%mm0, (%0) \n\t"
"psrlq $32, %%mm0 \n\t"
- "movd %%mm0, (%%eax) \n\t"
- "movd %%mm3, (%%eax, %1) \n\t"
+ "movd %%mm0, (%%"REG_a") \n\t"
+ "movd %%mm3, (%%"REG_a", %1) \n\t"
"psrlq $32, %%mm3 \n\t"
- "movd %%mm3, (%%eax, %1, 2) \n\t"
+ "movd %%mm3, (%%"REG_a", %1, 2) \n\t"
"movd %%mm2, (%0, %1, 4) \n\t"
"psrlq $32, %%mm2 \n\t"
- "movd %%mm2, (%%edx) \n\t"
- "movd %%mm1, (%%edx, %1) \n\t"
+ "movd %%mm2, (%%"REG_d") \n\t"
+ "movd %%mm1, (%%"REG_d", %1) \n\t"
"psrlq $32, %%mm1 \n\t"
- "movd %%mm1, (%%edx, %1, 2) \n\t"
+ "movd %%mm1, (%%"REG_d", %1, 2) \n\t"
"movq 64(%2), %%mm0 \n\t" // 12345678
@@ -2182,24 +2216,25 @@ static inline void RENAME(transpose2)(uint8_t *dst, int dstStride, uint8_t *src)
"movd %%mm0, 4(%0) \n\t"
"psrlq $32, %%mm0 \n\t"
- "movd %%mm0, 4(%%eax) \n\t"
- "movd %%mm3, 4(%%eax, %1) \n\t"
+ "movd %%mm0, 4(%%"REG_a") \n\t"
+ "movd %%mm3, 4(%%"REG_a", %1) \n\t"
"psrlq $32, %%mm3 \n\t"
- "movd %%mm3, 4(%%eax, %1, 2) \n\t"
+ "movd %%mm3, 4(%%"REG_a", %1, 2) \n\t"
"movd %%mm2, 4(%0, %1, 4) \n\t"
"psrlq $32, %%mm2 \n\t"
- "movd %%mm2, 4(%%edx) \n\t"
- "movd %%mm1, 4(%%edx, %1) \n\t"
+ "movd %%mm2, 4(%%"REG_d") \n\t"
+ "movd %%mm1, 4(%%"REG_d", %1) \n\t"
"psrlq $32, %%mm1 \n\t"
- "movd %%mm1, 4(%%edx, %1, 2) \n\t"
+ "movd %%mm1, 4(%%"REG_d", %1, 2) \n\t"
- :: "r" (dst), "r" (dstStride), "r" (src)
- : "%eax", "%edx"
+ :: "r" (dst), "r" ((long)dstStride), "r" (src)
+ : "%"REG_a, "%"REG_d
);
}
#endif
-//static int test=0;
+//static long test=0;
+#ifndef HAVE_ALTIVEC
static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
uint8_t *tempBlured, uint32_t *tempBluredPast, int *maxNoise)
{
@@ -2212,9 +2247,9 @@ static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
//#define L1_DIFF //u should change the thresholds too if u try that one
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
asm volatile(
- "leal (%2, %2, 2), %%eax \n\t" // 3*stride
- "leal (%2, %2, 4), %%edx \n\t" // 5*stride
- "leal (%%edx, %2, 2), %%ecx \n\t" // 7*stride
+ "lea (%2, %2, 2), %%"REG_a" \n\t" // 3*stride
+ "lea (%2, %2, 4), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
// 0 1 2 3 4 5 6 7 8 9
// %x %x+%2 %x+2%2 %x+eax %x+4%2 %x+edx %x+2eax %x+ecx %x+8%2
//FIXME reorder?
@@ -2225,29 +2260,30 @@ static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
"psadbw (%1, %2), %%mm1 \n\t" // |L1-R1|
"movq (%0, %2, 2), %%mm2 \n\t" // L2
"psadbw (%1, %2, 2), %%mm2 \n\t" // |L2-R2|
- "movq (%0, %%eax), %%mm3 \n\t" // L3
- "psadbw (%1, %%eax), %%mm3 \n\t" // |L3-R3|
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
+ "psadbw (%1, %%"REG_a"), %%mm3 \n\t" // |L3-R3|
"movq (%0, %2, 4), %%mm4 \n\t" // L4
"paddw %%mm1, %%mm0 \n\t"
"psadbw (%1, %2, 4), %%mm4 \n\t" // |L4-R4|
- "movq (%0, %%edx), %%mm5 \n\t" // L5
+ "movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
"paddw %%mm2, %%mm0 \n\t"
- "psadbw (%1, %%edx), %%mm5 \n\t" // |L5-R5|
- "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
+ "psadbw (%1, %%"REG_d"), %%mm5 \n\t" // |L5-R5|
+ "movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
"paddw %%mm3, %%mm0 \n\t"
- "psadbw (%1, %%eax, 2), %%mm6 \n\t" // |L6-R6|
- "movq (%0, %%ecx), %%mm7 \n\t" // L7
+ "psadbw (%1, %%"REG_a", 2), %%mm6 \n\t" // |L6-R6|
+ "movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
"paddw %%mm4, %%mm0 \n\t"
- "psadbw (%1, %%ecx), %%mm7 \n\t" // |L7-R7|
+ "psadbw (%1, %%"REG_c"), %%mm7 \n\t" // |L7-R7|
"paddw %%mm5, %%mm6 \n\t"
"paddw %%mm7, %%mm6 \n\t"
"paddw %%mm6, %%mm0 \n\t"
-#elif defined (FAST_L2_DIFF)
+#else
+#if defined (FAST_L2_DIFF)
"pcmpeqb %%mm7, %%mm7 \n\t"
"movq "MANGLE(b80)", %%mm6 \n\t"
"pxor %%mm0, %%mm0 \n\t"
-#define L2_DIFF_CORE(a, b)\
+#define REAL_L2_DIFF_CORE(a, b)\
"movq " #a ", %%mm5 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"pxor %%mm7, %%mm2 \n\t"\
@@ -2261,19 +2297,10 @@ static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
"psrld $14, %%mm5 \n\t"\
"paddd %%mm5, %%mm0 \n\t"
-L2_DIFF_CORE((%0), (%1))
-L2_DIFF_CORE((%0, %2), (%1, %2))
-L2_DIFF_CORE((%0, %2, 2), (%1, %2, 2))
-L2_DIFF_CORE((%0, %%eax), (%1, %%eax))
-L2_DIFF_CORE((%0, %2, 4), (%1, %2, 4))
-L2_DIFF_CORE((%0, %%edx), (%1, %%edx))
-L2_DIFF_CORE((%0, %%eax,2), (%1, %%eax,2))
-L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
-
#else
"pxor %%mm7, %%mm7 \n\t"
"pxor %%mm0, %%mm0 \n\t"
-#define L2_DIFF_CORE(a, b)\
+#define REAL_L2_DIFF_CORE(a, b)\
"movq " #a ", %%mm5 \n\t"\
"movq " #b ", %%mm2 \n\t"\
"movq %%mm5, %%mm1 \n\t"\
@@ -2289,14 +2316,18 @@ L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
"paddd %%mm1, %%mm5 \n\t"\
"paddd %%mm5, %%mm0 \n\t"
+#endif
+
+#define L2_DIFF_CORE(a, b) REAL_L2_DIFF_CORE(a, b)
+
L2_DIFF_CORE((%0), (%1))
L2_DIFF_CORE((%0, %2), (%1, %2))
L2_DIFF_CORE((%0, %2, 2), (%1, %2, 2))
-L2_DIFF_CORE((%0, %%eax), (%1, %%eax))
+L2_DIFF_CORE((%0, %%REGa), (%1, %%REGa))
L2_DIFF_CORE((%0, %2, 4), (%1, %2, 4))
-L2_DIFF_CORE((%0, %%edx), (%1, %%edx))
-L2_DIFF_CORE((%0, %%eax,2), (%1, %%eax,2))
-L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
+L2_DIFF_CORE((%0, %%REGd), (%1, %%REGd))
+L2_DIFF_CORE((%0, %%REGa,2), (%1, %%REGa,2))
+L2_DIFF_CORE((%0, %%REGc), (%1, %%REGc))
#endif
@@ -2305,94 +2336,94 @@ L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
"paddd %%mm0, %%mm4 \n\t"
"movd %%mm4, %%ecx \n\t"
"shll $2, %%ecx \n\t"
- "movl %3, %%edx \n\t"
- "addl -4(%%edx), %%ecx \n\t"
- "addl 4(%%edx), %%ecx \n\t"
- "addl -1024(%%edx), %%ecx \n\t"
+ "mov %3, %%"REG_d" \n\t"
+ "addl -4(%%"REG_d"), %%ecx \n\t"
+ "addl 4(%%"REG_d"), %%ecx \n\t"
+ "addl -1024(%%"REG_d"), %%ecx \n\t"
"addl $4, %%ecx \n\t"
- "addl 1024(%%edx), %%ecx \n\t"
+ "addl 1024(%%"REG_d"), %%ecx \n\t"
"shrl $3, %%ecx \n\t"
- "movl %%ecx, (%%edx) \n\t"
+ "movl %%ecx, (%%"REG_d") \n\t"
-// "movl %3, %%ecx \n\t"
-// "movl %%ecx, test \n\t"
+// "mov %3, %%"REG_c" \n\t"
+// "mov %%"REG_c", test \n\t"
// "jmp 4f \n\t"
- "cmpl 512(%%edx), %%ecx \n\t"
+ "cmpl 512(%%"REG_d"), %%ecx \n\t"
" jb 2f \n\t"
- "cmpl 516(%%edx), %%ecx \n\t"
+ "cmpl 516(%%"REG_d"), %%ecx \n\t"
" jb 1f \n\t"
- "leal (%%eax, %2, 2), %%edx \n\t" // 5*stride
- "leal (%%edx, %2, 2), %%ecx \n\t" // 7*stride
+ "lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
"movq (%0), %%mm0 \n\t" // L0
"movq (%0, %2), %%mm1 \n\t" // L1
"movq (%0, %2, 2), %%mm2 \n\t" // L2
- "movq (%0, %%eax), %%mm3 \n\t" // L3
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
"movq (%0, %2, 4), %%mm4 \n\t" // L4
- "movq (%0, %%edx), %%mm5 \n\t" // L5
- "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
- "movq (%0, %%ecx), %%mm7 \n\t" // L7
+ "movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
+ "movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
+ "movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
"movq %%mm0, (%1) \n\t" // L0
"movq %%mm1, (%1, %2) \n\t" // L1
"movq %%mm2, (%1, %2, 2) \n\t" // L2
- "movq %%mm3, (%1, %%eax) \n\t" // L3
+ "movq %%mm3, (%1, %%"REG_a") \n\t" // L3
"movq %%mm4, (%1, %2, 4) \n\t" // L4
- "movq %%mm5, (%1, %%edx) \n\t" // L5
- "movq %%mm6, (%1, %%eax, 2) \n\t" // L6
- "movq %%mm7, (%1, %%ecx) \n\t" // L7
+ "movq %%mm5, (%1, %%"REG_d") \n\t" // L5
+ "movq %%mm6, (%1, %%"REG_a", 2) \n\t" // L6
+ "movq %%mm7, (%1, %%"REG_c") \n\t" // L7
"jmp 4f \n\t"
"1: \n\t"
- "leal (%%eax, %2, 2), %%edx \n\t" // 5*stride
- "leal (%%edx, %2, 2), %%ecx \n\t" // 7*stride
+ "lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
"movq (%0), %%mm0 \n\t" // L0
PAVGB((%1), %%mm0) // L0
"movq (%0, %2), %%mm1 \n\t" // L1
PAVGB((%1, %2), %%mm1) // L1
"movq (%0, %2, 2), %%mm2 \n\t" // L2
PAVGB((%1, %2, 2), %%mm2) // L2
- "movq (%0, %%eax), %%mm3 \n\t" // L3
- PAVGB((%1, %%eax), %%mm3) // L3
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
+ PAVGB((%1, %%REGa), %%mm3) // L3
"movq (%0, %2, 4), %%mm4 \n\t" // L4
PAVGB((%1, %2, 4), %%mm4) // L4
- "movq (%0, %%edx), %%mm5 \n\t" // L5
- PAVGB((%1, %%edx), %%mm5) // L5
- "movq (%0, %%eax, 2), %%mm6 \n\t" // L6
- PAVGB((%1, %%eax, 2), %%mm6) // L6
- "movq (%0, %%ecx), %%mm7 \n\t" // L7
- PAVGB((%1, %%ecx), %%mm7) // L7
+ "movq (%0, %%"REG_d"), %%mm5 \n\t" // L5
+ PAVGB((%1, %%REGd), %%mm5) // L5
+ "movq (%0, %%"REG_a", 2), %%mm6 \n\t" // L6
+ PAVGB((%1, %%REGa, 2), %%mm6) // L6
+ "movq (%0, %%"REG_c"), %%mm7 \n\t" // L7
+ PAVGB((%1, %%REGc), %%mm7) // L7
"movq %%mm0, (%1) \n\t" // R0
"movq %%mm1, (%1, %2) \n\t" // R1
"movq %%mm2, (%1, %2, 2) \n\t" // R2
- "movq %%mm3, (%1, %%eax) \n\t" // R3
+ "movq %%mm3, (%1, %%"REG_a") \n\t" // R3
"movq %%mm4, (%1, %2, 4) \n\t" // R4
- "movq %%mm5, (%1, %%edx) \n\t" // R5
- "movq %%mm6, (%1, %%eax, 2) \n\t" // R6
- "movq %%mm7, (%1, %%ecx) \n\t" // R7
+ "movq %%mm5, (%1, %%"REG_d") \n\t" // R5
+ "movq %%mm6, (%1, %%"REG_a", 2) \n\t" // R6
+ "movq %%mm7, (%1, %%"REG_c") \n\t" // R7
"movq %%mm0, (%0) \n\t" // L0
"movq %%mm1, (%0, %2) \n\t" // L1
"movq %%mm2, (%0, %2, 2) \n\t" // L2
- "movq %%mm3, (%0, %%eax) \n\t" // L3
+ "movq %%mm3, (%0, %%"REG_a") \n\t" // L3
"movq %%mm4, (%0, %2, 4) \n\t" // L4
- "movq %%mm5, (%0, %%edx) \n\t" // L5
- "movq %%mm6, (%0, %%eax, 2) \n\t" // L6
- "movq %%mm7, (%0, %%ecx) \n\t" // L7
+ "movq %%mm5, (%0, %%"REG_d") \n\t" // L5
+ "movq %%mm6, (%0, %%"REG_a", 2) \n\t" // L6
+ "movq %%mm7, (%0, %%"REG_c") \n\t" // L7
"jmp 4f \n\t"
"2: \n\t"
- "cmpl 508(%%edx), %%ecx \n\t"
+ "cmpl 508(%%"REG_d"), %%ecx \n\t"
" jb 3f \n\t"
- "leal (%%eax, %2, 2), %%edx \n\t" // 5*stride
- "leal (%%edx, %2, 2), %%ecx \n\t" // 7*stride
+ "lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
"movq (%0), %%mm0 \n\t" // L0
"movq (%0, %2), %%mm1 \n\t" // L1
"movq (%0, %2, 2), %%mm2 \n\t" // L2
- "movq (%0, %%eax), %%mm3 \n\t" // L3
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
"movq (%1), %%mm4 \n\t" // R0
"movq (%1, %2), %%mm5 \n\t" // R1
"movq (%1, %2, 2), %%mm6 \n\t" // R2
- "movq (%1, %%eax), %%mm7 \n\t" // R3
+ "movq (%1, %%"REG_a"), %%mm7 \n\t" // R3
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
@@ -2404,20 +2435,20 @@ L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
"movq %%mm0, (%1) \n\t" // R0
"movq %%mm1, (%1, %2) \n\t" // R1
"movq %%mm2, (%1, %2, 2) \n\t" // R2
- "movq %%mm3, (%1, %%eax) \n\t" // R3
+ "movq %%mm3, (%1, %%"REG_a") \n\t" // R3
"movq %%mm0, (%0) \n\t" // L0
"movq %%mm1, (%0, %2) \n\t" // L1
"movq %%mm2, (%0, %2, 2) \n\t" // L2
- "movq %%mm3, (%0, %%eax) \n\t" // L3
+ "movq %%mm3, (%0, %%"REG_a") \n\t" // L3
"movq (%0, %2, 4), %%mm0 \n\t" // L4
- "movq (%0, %%edx), %%mm1 \n\t" // L5
- "movq (%0, %%eax, 2), %%mm2 \n\t" // L6
- "movq (%0, %%ecx), %%mm3 \n\t" // L7
+ "movq (%0, %%"REG_d"), %%mm1 \n\t" // L5
+ "movq (%0, %%"REG_a", 2), %%mm2 \n\t" // L6
+ "movq (%0, %%"REG_c"), %%mm3 \n\t" // L7
"movq (%1, %2, 4), %%mm4 \n\t" // R4
- "movq (%1, %%edx), %%mm5 \n\t" // R5
- "movq (%1, %%eax, 2), %%mm6 \n\t" // R6
- "movq (%1, %%ecx), %%mm7 \n\t" // R7
+ "movq (%1, %%"REG_d"), %%mm5 \n\t" // R5
+ "movq (%1, %%"REG_a", 2), %%mm6 \n\t" // R6
+ "movq (%1, %%"REG_c"), %%mm7 \n\t" // R7
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
@@ -2427,26 +2458,26 @@ L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
"movq %%mm0, (%1, %2, 4) \n\t" // R4
- "movq %%mm1, (%1, %%edx) \n\t" // R5
- "movq %%mm2, (%1, %%eax, 2) \n\t" // R6
- "movq %%mm3, (%1, %%ecx) \n\t" // R7
+ "movq %%mm1, (%1, %%"REG_d") \n\t" // R5
+ "movq %%mm2, (%1, %%"REG_a", 2) \n\t" // R6
+ "movq %%mm3, (%1, %%"REG_c") \n\t" // R7
"movq %%mm0, (%0, %2, 4) \n\t" // L4
- "movq %%mm1, (%0, %%edx) \n\t" // L5
- "movq %%mm2, (%0, %%eax, 2) \n\t" // L6
- "movq %%mm3, (%0, %%ecx) \n\t" // L7
+ "movq %%mm1, (%0, %%"REG_d") \n\t" // L5
+ "movq %%mm2, (%0, %%"REG_a", 2) \n\t" // L6
+ "movq %%mm3, (%0, %%"REG_c") \n\t" // L7
"jmp 4f \n\t"
"3: \n\t"
- "leal (%%eax, %2, 2), %%edx \n\t" // 5*stride
- "leal (%%edx, %2, 2), %%ecx \n\t" // 7*stride
+ "lea (%%"REG_a", %2, 2), %%"REG_d" \n\t" // 5*stride
+ "lea (%%"REG_d", %2, 2), %%"REG_c" \n\t" // 7*stride
"movq (%0), %%mm0 \n\t" // L0
"movq (%0, %2), %%mm1 \n\t" // L1
"movq (%0, %2, 2), %%mm2 \n\t" // L2
- "movq (%0, %%eax), %%mm3 \n\t" // L3
+ "movq (%0, %%"REG_a"), %%mm3 \n\t" // L3
"movq (%1), %%mm4 \n\t" // R0
"movq (%1, %2), %%mm5 \n\t" // R1
"movq (%1, %2, 2), %%mm6 \n\t" // R2
- "movq (%1, %%eax), %%mm7 \n\t" // R3
+ "movq (%1, %%"REG_a"), %%mm7 \n\t" // R3
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
@@ -2462,20 +2493,20 @@ L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
"movq %%mm0, (%1) \n\t" // R0
"movq %%mm1, (%1, %2) \n\t" // R1
"movq %%mm2, (%1, %2, 2) \n\t" // R2
- "movq %%mm3, (%1, %%eax) \n\t" // R3
+ "movq %%mm3, (%1, %%"REG_a") \n\t" // R3
"movq %%mm0, (%0) \n\t" // L0
"movq %%mm1, (%0, %2) \n\t" // L1
"movq %%mm2, (%0, %2, 2) \n\t" // L2
- "movq %%mm3, (%0, %%eax) \n\t" // L3
+ "movq %%mm3, (%0, %%"REG_a") \n\t" // L3
"movq (%0, %2, 4), %%mm0 \n\t" // L4
- "movq (%0, %%edx), %%mm1 \n\t" // L5
- "movq (%0, %%eax, 2), %%mm2 \n\t" // L6
- "movq (%0, %%ecx), %%mm3 \n\t" // L7
+ "movq (%0, %%"REG_d"), %%mm1 \n\t" // L5
+ "movq (%0, %%"REG_a", 2), %%mm2 \n\t" // L6
+ "movq (%0, %%"REG_c"), %%mm3 \n\t" // L7
"movq (%1, %2, 4), %%mm4 \n\t" // R4
- "movq (%1, %%edx), %%mm5 \n\t" // R5
- "movq (%1, %%eax, 2), %%mm6 \n\t" // R6
- "movq (%1, %%ecx), %%mm7 \n\t" // R7
+ "movq (%1, %%"REG_d"), %%mm5 \n\t" // R5
+ "movq (%1, %%"REG_a", 2), %%mm6 \n\t" // R6
+ "movq (%1, %%"REG_c"), %%mm7 \n\t" // R7
PAVGB(%%mm4, %%mm0)
PAVGB(%%mm5, %%mm1)
PAVGB(%%mm6, %%mm2)
@@ -2489,25 +2520,25 @@ L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
PAVGB(%%mm6, %%mm2)
PAVGB(%%mm7, %%mm3)
"movq %%mm0, (%1, %2, 4) \n\t" // R4
- "movq %%mm1, (%1, %%edx) \n\t" // R5
- "movq %%mm2, (%1, %%eax, 2) \n\t" // R6
- "movq %%mm3, (%1, %%ecx) \n\t" // R7
+ "movq %%mm1, (%1, %%"REG_d") \n\t" // R5
+ "movq %%mm2, (%1, %%"REG_a", 2) \n\t" // R6
+ "movq %%mm3, (%1, %%"REG_c") \n\t" // R7
"movq %%mm0, (%0, %2, 4) \n\t" // L4
- "movq %%mm1, (%0, %%edx) \n\t" // L5
- "movq %%mm2, (%0, %%eax, 2) \n\t" // L6
- "movq %%mm3, (%0, %%ecx) \n\t" // L7
+ "movq %%mm1, (%0, %%"REG_d") \n\t" // L5
+ "movq %%mm2, (%0, %%"REG_a", 2) \n\t" // L6
+ "movq %%mm3, (%0, %%"REG_c") \n\t" // L7
"4: \n\t"
- :: "r" (src), "r" (tempBlured), "r"(stride), "m" (tempBluredPast)
- : "%eax", "%edx", "%ecx", "memory"
+ :: "r" (src), "r" (tempBlured), "r"((long)stride), "m" (tempBluredPast)
+ : "%"REG_a, "%"REG_d, "%"REG_c, "memory"
);
//printf("%d\n", test);
#else
{
int y;
int d=0;
- int sysd=0;
+// int sysd=0;
int i;
for(y=0; y<8; y++)
@@ -2522,7 +2553,7 @@ L2_DIFF_CORE((%0, %%ecx), (%1, %%ecx))
// if(y==0 || y==7) d1+= d1>>1;
// d+= ABS(d1);
d+= d1*d1;
- sysd+= d1;
+// sysd+= d1;
}
}
i=d;
@@ -2608,6 +2639,543 @@ Switch between
}
#endif
}
+#endif //HAVE_ALTIVEC
+
+#ifdef HAVE_MMX
+/**
+ * accurate deblock filter
+ */
+static always_inline void RENAME(do_a_deblock)(uint8_t *src, int step, int stride, PPContext *c){
+ int64_t dc_mask, eq_mask;
+ int64_t sums[10*8*2];
+ src+= step*3; // src points to begin of the 8x8 Block
+//START_TIMER
+asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ : : "m" (c->mmxDcOffset[c->nonBQP]), "m" (c->mmxDcThreshold[c->nonBQP])
+ );
+
+asm volatile(
+ "lea (%2, %3), %%"REG_a" \n\t"
+// 0 1 2 3 4 5 6 7 8 9
+// %1 eax eax+%2 eax+2%2 %1+4%2 ecx ecx+%2 ecx+2%2 %1+8%2 ecx+4%2
+
+ "movq (%2), %%mm0 \n\t"
+ "movq (%%"REG_a"), %%mm1 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
+ "paddb %%mm7, %%mm0 \n\t"
+ "pcmpgtb %%mm6, %%mm0 \n\t"
+
+ "movq (%%"REG_a",%3), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3, 2), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "lea (%%"REG_a", %3, 4), %%"REG_a" \n\t"
+
+ "movq (%2, %3, 4), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a"), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3, 2), %%mm1 \n\t"
+ PMAXUB(%%mm1, %%mm4)
+ PMINUB(%%mm1, %%mm3, %%mm5)
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+
+ "movq (%2, %3, 8), %%mm2 \n\t"
+ PMAXUB(%%mm2, %%mm4)
+ PMINUB(%%mm2, %%mm3, %%mm5)
+ "psubb %%mm2, %%mm1 \n\t"
+ "paddb %%mm7, %%mm1 \n\t"
+ "pcmpgtb %%mm6, %%mm1 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+
+ "movq (%%"REG_a", %3, 4), %%mm1 \n\t"
+ "psubb %%mm1, %%mm2 \n\t"
+ "paddb %%mm7, %%mm2 \n\t"
+ "pcmpgtb %%mm6, %%mm2 \n\t"
+ "paddb %%mm2, %%mm0 \n\t"
+ "psubusb %%mm3, %%mm4 \n\t"
+
+ "pxor %%mm6, %%mm6 \n\t"
+ "movq %4, %%mm7 \n\t" // QP,..., QP
+ "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
+ "psubusb %%mm4, %%mm7 \n\t" // Diff >=2QP -> 0
+ "pcmpeqb %%mm6, %%mm7 \n\t" // Diff < 2QP -> 0
+ "pcmpeqb %%mm6, %%mm7 \n\t" // Diff < 2QP -> 0
+ "movq %%mm7, %1 \n\t"
+
+ "movq %5, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "punpcklbw %%mm7, %%mm7 \n\t"
+ "psubb %%mm0, %%mm6 \n\t"
+ "pcmpgtb %%mm7, %%mm6 \n\t"
+ "movq %%mm6, %0 \n\t"
+
+ : "=m" (eq_mask), "=m" (dc_mask)
+ : "r" (src), "r" ((long)step), "m" (c->pQPb), "m"(c->ppMode.flatnessThreshold)
+ : "%"REG_a
+ );
+
+ if(dc_mask & eq_mask){
+ long offset= -8*step;
+ int64_t *temp_sums= sums;
+
+ asm volatile(
+ "movq %2, %%mm0 \n\t" // QP,..., QP
+ "pxor %%mm4, %%mm4 \n\t"
+
+ "movq (%0), %%mm6 \n\t"
+ "movq (%0, %1), %%mm5 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm6, %%mm2 \n\t"
+ "psubusb %%mm6, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
+ "psubusb %%mm2, %%mm0 \n\t" // diff >= QP -> 0
+ "pcmpeqb %%mm4, %%mm0 \n\t" // diff >= QP -> FF
+
+ "pxor %%mm6, %%mm1 \n\t"
+ "pand %%mm0, %%mm1 \n\t"
+ "pxor %%mm1, %%mm6 \n\t"
+ // 0:QP 6:First
+
+ "movq (%0, %1, 8), %%mm5 \n\t"
+ "add %1, %0 \n\t" // %0 points to line 1 not 0
+ "movq (%0, %1, 8), %%mm7 \n\t"
+ "movq %%mm5, %%mm1 \n\t"
+ "movq %%mm7, %%mm2 \n\t"
+ "psubusb %%mm7, %%mm5 \n\t"
+ "psubusb %%mm1, %%mm2 \n\t"
+ "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
+ "movq %2, %%mm0 \n\t" // QP,..., QP
+ "psubusb %%mm2, %%mm0 \n\t" // diff >= QP -> 0
+ "pcmpeqb %%mm4, %%mm0 \n\t" // diff >= QP -> FF
+
+ "pxor %%mm7, %%mm1 \n\t"
+ "pand %%mm0, %%mm1 \n\t"
+ "pxor %%mm1, %%mm7 \n\t"
+
+ "movq %%mm6, %%mm5 \n\t"
+ "punpckhbw %%mm4, %%mm6 \n\t"
+ "punpcklbw %%mm4, %%mm5 \n\t"
+ // 4:0 5/6:First 7:Last
+
+ "movq %%mm5, %%mm0 \n\t"
+ "movq %%mm6, %%mm1 \n\t"
+ "psllw $2, %%mm0 \n\t"
+ "psllw $2, %%mm1 \n\t"
+ "paddw "MANGLE(w04)", %%mm0 \n\t"
+ "paddw "MANGLE(w04)", %%mm1 \n\t"
+
+#define NEXT\
+ "movq (%0), %%mm2 \n\t"\
+ "movq (%0), %%mm3 \n\t"\
+ "add %1, %0 \n\t"\
+ "punpcklbw %%mm4, %%mm2 \n\t"\
+ "punpckhbw %%mm4, %%mm3 \n\t"\
+ "paddw %%mm2, %%mm0 \n\t"\
+ "paddw %%mm3, %%mm1 \n\t"
+
+#define PREV\
+ "movq (%0), %%mm2 \n\t"\
+ "movq (%0), %%mm3 \n\t"\
+ "add %1, %0 \n\t"\
+ "punpcklbw %%mm4, %%mm2 \n\t"\
+ "punpckhbw %%mm4, %%mm3 \n\t"\
+ "psubw %%mm2, %%mm0 \n\t"\
+ "psubw %%mm3, %%mm1 \n\t"
+
+
+ NEXT //0
+ NEXT //1
+ NEXT //2
+ "movq %%mm0, (%3) \n\t"
+ "movq %%mm1, 8(%3) \n\t"
+
+ NEXT //3
+ "psubw %%mm5, %%mm0 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+ "movq %%mm0, 16(%3) \n\t"
+ "movq %%mm1, 24(%3) \n\t"
+
+ NEXT //4
+ "psubw %%mm5, %%mm0 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+ "movq %%mm0, 32(%3) \n\t"
+ "movq %%mm1, 40(%3) \n\t"
+
+ NEXT //5
+ "psubw %%mm5, %%mm0 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+ "movq %%mm0, 48(%3) \n\t"
+ "movq %%mm1, 56(%3) \n\t"
+
+ NEXT //6
+ "psubw %%mm5, %%mm0 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+ "movq %%mm0, 64(%3) \n\t"
+ "movq %%mm1, 72(%3) \n\t"
+
+ "movq %%mm7, %%mm6 \n\t"
+ "punpckhbw %%mm4, %%mm7 \n\t"
+ "punpcklbw %%mm4, %%mm6 \n\t"
+
+ NEXT //7
+ "mov %4, %0 \n\t"
+ "add %1, %0 \n\t"
+ PREV //0
+ "movq %%mm0, 80(%3) \n\t"
+ "movq %%mm1, 88(%3) \n\t"
+
+ PREV //1
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "movq %%mm0, 96(%3) \n\t"
+ "movq %%mm1, 104(%3) \n\t"
+
+ PREV //2
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "movq %%mm0, 112(%3) \n\t"
+ "movq %%mm1, 120(%3) \n\t"
+
+ PREV //3
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "movq %%mm0, 128(%3) \n\t"
+ "movq %%mm1, 136(%3) \n\t"
+
+ PREV //4
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "movq %%mm0, 144(%3) \n\t"
+ "movq %%mm1, 152(%3) \n\t"
+
+ "mov %4, %0 \n\t" //FIXME
+
+ : "+&r"(src)
+ : "r" ((long)step), "m" (c->pQPb), "r"(sums), "g"(src)
+ );
+
+ src+= step; // src points to begin of the 8x8 Block
+
+ asm volatile(
+ "movq %4, %%mm6 \n\t"
+ "pcmpeqb %%mm5, %%mm5 \n\t"
+ "pxor %%mm6, %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 8(%1), %%mm1 \n\t"
+ "paddw 32(%1), %%mm0 \n\t"
+ "paddw 40(%1), %%mm1 \n\t"
+ "movq (%0, %3), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm2, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "paddw %%mm3, %%mm1 \n\t"
+ "psrlw $4, %%mm0 \n\t"
+ "psrlw $4, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm5, %%mm4 \n\t"
+ "por %%mm4, %%mm0 \n\t"
+ "movq %%mm0, (%0, %3) \n\t"
+ "add $16, %1 \n\t"
+ "add %2, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r"(offset), "+r"(temp_sums)
+ : "r" ((long)step), "r"(src - offset), "m"(dc_mask & eq_mask)
+ );
+ }else
+ src+= step; // src points to begin of the 8x8 Block
+
+ if(eq_mask != -1LL){
+ uint8_t *temp_src= src;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea -40(%%"REG_SP"), %%"REG_c" \n\t" // make space for 4 8-byte vars
+ "and "ALIGN_MASK", %%"REG_c" \n\t" // align
+// 0 1 2 3 4 5 6 7 8 9
+// %0 eax eax+%1 eax+2%1 %0+4%1 ecx ecx+%1 ecx+2%1 %1+8%1 ecx+4%1
+
+ "movq (%0), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
+ "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
+
+ "movq (%0, %1), %%mm2 \n\t"
+ "lea (%0, %1, 2), %%"REG_a" \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
+ "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
+
+ "movq (%%"REG_a"), %%mm4 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
+ "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
+
+ "paddw %%mm0, %%mm0 \n\t" // 2L0
+ "paddw %%mm1, %%mm1 \n\t" // 2H0
+ "psubw %%mm4, %%mm2 \n\t" // L1 - L2
+ "psubw %%mm5, %%mm3 \n\t" // H1 - H2
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
+
+ "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
+ "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
+
+ "movq (%%"REG_a", %1), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L3
+ "punpckhbw %%mm7, %%mm3 \n\t" // H3
+
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
+ "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+ "movq %%mm0, (%%"REG_c") \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq %%mm1, 8(%%"REG_c") \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+
+ "movq (%%"REG_a", %1, 2), %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t" // L4
+ "punpckhbw %%mm7, %%mm1 \n\t" // H4
+
+ "psubw %%mm0, %%mm2 \n\t" // L3 - L4
+ "psubw %%mm1, %%mm3 \n\t" // H3 - H4
+ "movq %%mm2, 16(%%"REG_c") \n\t" // L3 - L4
+ "movq %%mm3, 24(%%"REG_c") \n\t" // H3 - H4
+ "paddw %%mm4, %%mm4 \n\t" // 2L2
+ "paddw %%mm5, %%mm5 \n\t" // 2H2
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
+
+ "lea (%%"REG_a", %1), %0 \n\t"
+ "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
+ "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
+//50 opcodes so far
+ "movq (%0, %1, 2), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L5
+ "punpckhbw %%mm7, %%mm3 \n\t" // H5
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
+ "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
+ "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
+
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t"
+ "punpcklbw %%mm7, %%mm6 \n\t" // L6
+ "psubw %%mm6, %%mm2 \n\t" // L5 - L6
+ "movq (%%"REG_a", %1, 4), %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm6 \n\t" // H6
+ "psubw %%mm6, %%mm3 \n\t" // H5 - H6
+
+ "paddw %%mm0, %%mm0 \n\t" // 2L4
+ "paddw %%mm1, %%mm1 \n\t" // 2H4
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
+
+ "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
+ "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
+
+ "movq (%0, %1, 4), %%mm2 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t" // L7
+ "punpckhbw %%mm7, %%mm3 \n\t" // H7
+
+ "paddw %%mm2, %%mm2 \n\t" // 2L7
+ "paddw %%mm3, %%mm3 \n\t" // 2H7
+ "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
+ "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
+
+ "movq (%%"REG_c"), %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
+ "movq 8(%%"REG_c"), %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
+
+#ifdef HAVE_MMX2
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm0, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm1, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm2, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "psubw %%mm3, %%mm6 \n\t"
+ "pmaxsw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
+#else
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm0, %%mm6 \n\t"
+ "pxor %%mm6, %%mm0 \n\t"
+ "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm1, %%mm6 \n\t"
+ "pxor %%mm6, %%mm1 \n\t"
+ "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm2, %%mm6 \n\t"
+ "pxor %%mm6, %%mm2 \n\t"
+ "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm3, %%mm6 \n\t"
+ "pxor %%mm6, %%mm3 \n\t"
+ "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
+#endif
+
+#ifdef HAVE_MMX2
+ "pminsw %%mm2, %%mm0 \n\t"
+ "pminsw %%mm3, %%mm1 \n\t"
+#else
+ "movq %%mm0, %%mm6 \n\t"
+ "psubusw %%mm2, %%mm6 \n\t"
+ "psubw %%mm6, %%mm0 \n\t"
+ "movq %%mm1, %%mm6 \n\t"
+ "psubusw %%mm3, %%mm6 \n\t"
+ "psubw %%mm6, %%mm1 \n\t"
+#endif
+
+ "movd %2, %%mm2 \n\t" // QP
+ "punpcklbw %%mm7, %%mm2 \n\t"
+
+ "movq %%mm7, %%mm6 \n\t" // 0
+ "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
+ "pxor %%mm6, %%mm4 \n\t"
+ "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
+ "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
+ "pxor %%mm7, %%mm5 \n\t"
+ "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
+// 100 opcodes
+ "psllw $3, %%mm2 \n\t" // 8QP
+ "movq %%mm2, %%mm3 \n\t" // 8QP
+ "pcmpgtw %%mm4, %%mm2 \n\t"
+ "pcmpgtw %%mm5, %%mm3 \n\t"
+ "pand %%mm2, %%mm4 \n\t"
+ "pand %%mm3, %%mm5 \n\t"
+
+
+ "psubusw %%mm0, %%mm4 \n\t" // hd
+ "psubusw %%mm1, %%mm5 \n\t" // ld
+
+
+ "movq "MANGLE(w05)", %%mm2 \n\t" // 5
+ "pmullw %%mm2, %%mm4 \n\t"
+ "pmullw %%mm2, %%mm5 \n\t"
+ "movq "MANGLE(w20)", %%mm2 \n\t" // 32
+ "paddw %%mm2, %%mm4 \n\t"
+ "paddw %%mm2, %%mm5 \n\t"
+ "psrlw $6, %%mm4 \n\t"
+ "psrlw $6, %%mm5 \n\t"
+
+ "movq 16(%%"REG_c"), %%mm0 \n\t" // L3 - L4
+ "movq 24(%%"REG_c"), %%mm1 \n\t" // H3 - H4
+
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+
+ "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
+ "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
+ "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
+ "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
+ "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
+
+ "pxor %%mm6, %%mm2 \n\t"
+ "pxor %%mm7, %%mm3 \n\t"
+ "pand %%mm2, %%mm4 \n\t"
+ "pand %%mm3, %%mm5 \n\t"
+
+#ifdef HAVE_MMX2
+ "pminsw %%mm0, %%mm4 \n\t"
+ "pminsw %%mm1, %%mm5 \n\t"
+#else
+ "movq %%mm4, %%mm2 \n\t"
+ "psubusw %%mm0, %%mm2 \n\t"
+ "psubw %%mm2, %%mm4 \n\t"
+ "movq %%mm5, %%mm2 \n\t"
+ "psubusw %%mm1, %%mm2 \n\t"
+ "psubw %%mm2, %%mm5 \n\t"
+#endif
+ "pxor %%mm6, %%mm4 \n\t"
+ "pxor %%mm7, %%mm5 \n\t"
+ "psubw %%mm6, %%mm4 \n\t"
+ "psubw %%mm7, %%mm5 \n\t"
+ "packsswb %%mm5, %%mm4 \n\t"
+ "movq %3, %%mm1 \n\t"
+ "pandn %%mm4, %%mm1 \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "paddb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq (%0, %1), %%mm0 \n\t"
+ "psubb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%0, %1) \n\t"
+
+ : "+r" (temp_src)
+ : "r" ((long)step), "m" (c->pQPb), "m"(eq_mask)
+ : "%"REG_a, "%"REG_c
+ );
+ }
+/*if(step==16){
+ STOP_TIMER("step16")
+}else{
+ STOP_TIMER("stepX")
+}*/
+}
+#endif //HAVE_MMX
static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
QP_STORE_T QPs[], int QPStride, int isColor, PPContext *c);
@@ -2628,13 +3196,13 @@ static inline void RENAME(blockCopy)(uint8_t dst[], int dstStride, uint8_t src[]
{
#ifdef HAVE_MMX
asm volatile(
- "movq (%%eax), %%mm2 \n\t" // packedYOffset
- "movq 8(%%eax), %%mm3 \n\t" // packedYScale
- "leal (%2,%4), %%eax \n\t"
- "leal (%3,%5), %%edx \n\t"
+ "movq (%%"REG_a"), %%mm2 \n\t" // packedYOffset
+ "movq 8(%%"REG_a"), %%mm3 \n\t" // packedYScale
+ "lea (%2,%4), %%"REG_a" \n\t"
+ "lea (%3,%5), %%"REG_d" \n\t"
"pxor %%mm4, %%mm4 \n\t"
#ifdef HAVE_MMX2
-#define SCALED_CPY(src1, src2, dst1, dst2) \
+#define REAL_SCALED_CPY(src1, src2, dst1, dst2) \
"movq " #src1 ", %%mm0 \n\t"\
"movq " #src1 ", %%mm5 \n\t"\
"movq " #src2 ", %%mm1 \n\t"\
@@ -2657,7 +3225,7 @@ static inline void RENAME(blockCopy)(uint8_t dst[], int dstStride, uint8_t src[]
"movq %%mm1, " #dst2 " \n\t"\
#else //HAVE_MMX2
-#define SCALED_CPY(src1, src2, dst1, dst2) \
+#define REAL_SCALED_CPY(src1, src2, dst1, dst2) \
"movq " #src1 ", %%mm0 \n\t"\
"movq " #src1 ", %%mm5 \n\t"\
"punpcklbw %%mm4, %%mm0 \n\t"\
@@ -2684,22 +3252,24 @@ static inline void RENAME(blockCopy)(uint8_t dst[], int dstStride, uint8_t src[]
"movq %%mm1, " #dst2 " \n\t"\
#endif //!HAVE_MMX2
+#define SCALED_CPY(src1, src2, dst1, dst2)\
+ REAL_SCALED_CPY(src1, src2, dst1, dst2)
SCALED_CPY((%2) , (%2, %4) , (%3) , (%3, %5))
-SCALED_CPY((%2, %4, 2), (%%eax, %4, 2), (%3, %5, 2), (%%edx, %5, 2))
-SCALED_CPY((%2, %4, 4), (%%eax, %4, 4), (%3, %5, 4), (%%edx, %5, 4))
- "leal (%%eax,%4,4), %%eax \n\t"
- "leal (%%edx,%5,4), %%edx \n\t"
-SCALED_CPY((%%eax, %4), (%%eax, %4, 2), (%%edx, %5), (%%edx, %5, 2))
+SCALED_CPY((%2, %4, 2), (%%REGa, %4, 2), (%3, %5, 2), (%%REGd, %5, 2))
+SCALED_CPY((%2, %4, 4), (%%REGa, %4, 4), (%3, %5, 4), (%%REGd, %5, 4))
+ "lea (%%"REG_a",%4,4), %%"REG_a" \n\t"
+ "lea (%%"REG_d",%5,4), %%"REG_d" \n\t"
+SCALED_CPY((%%REGa, %4), (%%REGa, %4, 2), (%%REGd, %5), (%%REGd, %5, 2))
: "=&a" (packedOffsetAndScale)
: "0" (packedOffsetAndScale),
"r"(src),
"r"(dst),
- "r" (srcStride),
- "r" (dstStride)
- : "%edx"
+ "r" ((long)srcStride),
+ "r" ((long)dstStride)
+ : "%"REG_d
);
#else
for(i=0; i<8; i++)
@@ -2711,27 +3281,30 @@ SCALED_CPY((%%eax, %4), (%%eax, %4, 2), (%%edx, %5), (%%edx, %5, 2))
{
#ifdef HAVE_MMX
asm volatile(
- "leal (%0,%2), %%eax \n\t"
- "leal (%1,%3), %%edx \n\t"
+ "lea (%0,%2), %%"REG_a" \n\t"
+ "lea (%1,%3), %%"REG_d" \n\t"
-#define SIMPLE_CPY(src1, src2, dst1, dst2) \
+#define REAL_SIMPLE_CPY(src1, src2, dst1, dst2) \
"movq " #src1 ", %%mm0 \n\t"\
"movq " #src2 ", %%mm1 \n\t"\
"movq %%mm0, " #dst1 " \n\t"\
"movq %%mm1, " #dst2 " \n\t"\
+#define SIMPLE_CPY(src1, src2, dst1, dst2)\
+ REAL_SIMPLE_CPY(src1, src2, dst1, dst2)
+
SIMPLE_CPY((%0) , (%0, %2) , (%1) , (%1, %3))
-SIMPLE_CPY((%0, %2, 2), (%%eax, %2, 2), (%1, %3, 2), (%%edx, %3, 2))
-SIMPLE_CPY((%0, %2, 4), (%%eax, %2, 4), (%1, %3, 4), (%%edx, %3, 4))
- "leal (%%eax,%2,4), %%eax \n\t"
- "leal (%%edx,%3,4), %%edx \n\t"
-SIMPLE_CPY((%%eax, %2), (%%eax, %2, 2), (%%edx, %3), (%%edx, %3, 2))
+SIMPLE_CPY((%0, %2, 2), (%%REGa, %2, 2), (%1, %3, 2), (%%REGd, %3, 2))
+SIMPLE_CPY((%0, %2, 4), (%%REGa, %2, 4), (%1, %3, 4), (%%REGd, %3, 4))
+ "lea (%%"REG_a",%2,4), %%"REG_a" \n\t"
+ "lea (%%"REG_d",%3,4), %%"REG_d" \n\t"
+SIMPLE_CPY((%%REGa, %2), (%%REGa, %2, 2), (%%REGd, %3), (%%REGd, %3, 2))
: : "r" (src),
"r" (dst),
- "r" (srcStride),
- "r" (dstStride)
- : "%eax", "%edx"
+ "r" ((long)srcStride),
+ "r" ((long)dstStride)
+ : "%"REG_a, "%"REG_d
);
#else
for(i=0; i<8; i++)
@@ -2749,12 +3322,12 @@ static inline void RENAME(duplicate)(uint8_t src[], int stride)
#ifdef HAVE_MMX
asm volatile(
"movq (%0), %%mm0 \n\t"
- "addl %1, %0 \n\t"
+ "add %1, %0 \n\t"
"movq %%mm0, (%0) \n\t"
"movq %%mm0, (%0, %1) \n\t"
"movq %%mm0, (%0, %1, 2) \n\t"
: "+r" (src)
- : "r" (-stride)
+ : "r" ((long)-stride)
);
#else
int i;
@@ -2793,8 +3366,8 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
//FIXME remove
uint64_t * const yHistogram= c.yHistogram;
- uint8_t * const tempSrc= c.tempSrc;
- uint8_t * const tempDst= c.tempDst;
+ uint8_t * const tempSrc= srcStride > 0 ? c.tempSrc : c.tempSrc - 23*srcStride;
+ uint8_t * const tempDst= dstStride > 0 ? c.tempDst : c.tempDst - 23*dstStride;
//const int mbWidth= isColor ? (width+7)>>3 : (width+15)>>4;
#ifdef HAVE_MMX
@@ -2814,7 +3387,8 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
|| (mode & LOWPASS5_DEINT_FILTER)) copyAhead=14;
else if( (mode & V_DEBLOCK)
|| (mode & LINEAR_IPOL_DEINT_FILTER)
- || (mode & MEDIAN_DEINT_FILTER)) copyAhead=13;
+ || (mode & MEDIAN_DEINT_FILTER)
+ || (mode & V_A_DEBLOCK)) copyAhead=13;
else if(mode & V_X1_FILTER) copyAhead=11;
// else if(mode & V_RK1_FILTER) copyAhead=10;
else if(mode & DERING) copyAhead=9;
@@ -2905,22 +3479,22 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
*/
asm(
- "movl %4, %%eax \n\t"
- "shrl $2, %%eax \n\t"
- "andl $6, %%eax \n\t"
- "addl %5, %%eax \n\t"
- "movl %%eax, %%edx \n\t"
- "imul %1, %%eax \n\t"
- "imul %3, %%edx \n\t"
- "prefetchnta 32(%%eax, %0) \n\t"
- "prefetcht0 32(%%edx, %2) \n\t"
- "addl %1, %%eax \n\t"
- "addl %3, %%edx \n\t"
- "prefetchnta 32(%%eax, %0) \n\t"
- "prefetcht0 32(%%edx, %2) \n\t"
- :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
- "m" (x), "m" (copyAhead)
- : "%eax", "%edx"
+ "mov %4, %%"REG_a" \n\t"
+ "shr $2, %%"REG_a" \n\t"
+ "and $6, %%"REG_a" \n\t"
+ "add %5, %%"REG_a" \n\t"
+ "mov %%"REG_a", %%"REG_d" \n\t"
+ "imul %1, %%"REG_a" \n\t"
+ "imul %3, %%"REG_d" \n\t"
+ "prefetchnta 32(%%"REG_a", %0) \n\t"
+ "prefetcht0 32(%%"REG_d", %2) \n\t"
+ "add %1, %%"REG_a" \n\t"
+ "add %3, %%"REG_d" \n\t"
+ "prefetchnta 32(%%"REG_a", %0) \n\t"
+ "prefetcht0 32(%%"REG_d", %2) \n\t"
+ :: "r" (srcBlock), "r" ((long)srcStride), "r" (dstBlock), "r" ((long)dstStride),
+ "m" ((long)x), "m" ((long)copyAhead)
+ : "%"REG_a, "%"REG_d
);
#elif defined(HAVE_3DNOW)
@@ -2955,8 +3529,8 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
dstBlock+=8;
srcBlock+=8;
}
- if(width==dstStride)
- memcpy(dst, tempDst + 9*dstStride, copyAhead*dstStride);
+ if(width==ABS(dstStride))
+ linecpy(dst, tempDst + 9*dstStride, copyAhead, dstStride);
else
{
int i;
@@ -2978,7 +3552,7 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
uint8_t *tempBlock2= c.tempBlocks + 8;
#endif
int8_t *QPptr= &QPs[(y>>qpVShift)*QPStride];
- int8_t *nonBQPptr= &c.nonBQPTable[(y>>qpVShift)*QPStride];
+ int8_t *nonBQPptr= &c.nonBQPTable[(y>>qpVShift)*ABS(QPStride)];
int QP=0;
/* can we mess with a 8x16 block from srcBlock/dstBlock downwards and 1 line upwards
if not than use a temporary buffer */
@@ -2987,19 +3561,19 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
int i;
/* copy from line (copyAhead) to (copyAhead+7) of src, these will be copied with
blockcopy to dst later */
- memcpy(tempSrc + srcStride*copyAhead, srcBlock + srcStride*copyAhead,
- srcStride*MAX(height-y-copyAhead, 0) );
+ linecpy(tempSrc + srcStride*copyAhead, srcBlock + srcStride*copyAhead,
+ MAX(height-y-copyAhead, 0), srcStride);
/* duplicate last line of src to fill the void upto line (copyAhead+7) */
for(i=MAX(height-y, 8); i<copyAhead+8; i++)
- memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), srcStride);
+ memcpy(tempSrc + srcStride*i, src + srcStride*(height-1), ABS(srcStride));
/* copy up to (copyAhead+1) lines of dst (line -1 to (copyAhead-1))*/
- memcpy(tempDst, dstBlock - dstStride, dstStride*MIN(height-y+1, copyAhead+1) );
+ linecpy(tempDst, dstBlock - dstStride, MIN(height-y+1, copyAhead+1), dstStride);
/* duplicate last line of dst to fill the void upto line (copyAhead) */
for(i=height-y+1; i<=copyAhead; i++)
- memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), dstStride);
+ memcpy(tempDst + dstStride*i, dst + dstStride*(height-1), ABS(dstStride));
dstBlock= tempDst + dstStride;
srcBlock= tempSrc;
@@ -3051,22 +3625,22 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
*/
asm(
- "movl %4, %%eax \n\t"
- "shrl $2, %%eax \n\t"
- "andl $6, %%eax \n\t"
- "addl %5, %%eax \n\t"
- "movl %%eax, %%edx \n\t"
- "imul %1, %%eax \n\t"
- "imul %3, %%edx \n\t"
- "prefetchnta 32(%%eax, %0) \n\t"
- "prefetcht0 32(%%edx, %2) \n\t"
- "addl %1, %%eax \n\t"
- "addl %3, %%edx \n\t"
- "prefetchnta 32(%%eax, %0) \n\t"
- "prefetcht0 32(%%edx, %2) \n\t"
- :: "r" (srcBlock), "r" (srcStride), "r" (dstBlock), "r" (dstStride),
- "m" (x), "m" (copyAhead)
- : "%eax", "%edx"
+ "mov %4, %%"REG_a" \n\t"
+ "shr $2, %%"REG_a" \n\t"
+ "and $6, %%"REG_a" \n\t"
+ "add %5, %%"REG_a" \n\t"
+ "mov %%"REG_a", %%"REG_d" \n\t"
+ "imul %1, %%"REG_a" \n\t"
+ "imul %3, %%"REG_d" \n\t"
+ "prefetchnta 32(%%"REG_a", %0) \n\t"
+ "prefetcht0 32(%%"REG_d", %2) \n\t"
+ "add %1, %%"REG_a" \n\t"
+ "add %3, %%"REG_d" \n\t"
+ "prefetchnta 32(%%"REG_a", %0) \n\t"
+ "prefetcht0 32(%%"REG_d", %2) \n\t"
+ :: "r" (srcBlock), "r" ((long)srcStride), "r" (dstBlock), "r" ((long)dstStride),
+ "m" ((long)x), "m" ((long)copyAhead)
+ : "%"REG_a, "%"REG_d
);
#elif defined(HAVE_3DNOW)
@@ -3110,6 +3684,8 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
RENAME(doVertLowPass)(dstBlock, stride, &c);
else if(t==2)
RENAME(doVertDefFilter)(dstBlock, stride, &c);
+ }else if(mode & V_A_DEBLOCK){
+ RENAME(do_a_deblock)(dstBlock, stride, 1, &c);
}
}
@@ -3131,6 +3707,8 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
RENAME(doVertLowPass)(tempBlock1, 16, &c);
else if(t==2)
RENAME(doVertDefFilter)(tempBlock1, 16, &c);
+ }else if(mode & H_A_DEBLOCK){
+ RENAME(do_a_deblock)(tempBlock1, 16, 1, &c);
}
RENAME(transpose2)(dstBlock-4, dstStride, tempBlock1 + 4*16);
@@ -3140,12 +3718,29 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
horizX1Filter(dstBlock-4, stride, QP);
else if(mode & H_DEBLOCK)
{
+#ifdef HAVE_ALTIVEC
+ unsigned char __attribute__ ((aligned(16))) tempBlock[272];
+ transpose_16x8_char_toPackedAlign_altivec(tempBlock, dstBlock - (4 + 1), stride);
+
+ const int t=vertClassify_altivec(tempBlock-48, 16, &c);
+ if(t==1) {
+ doVertLowPass_altivec(tempBlock-48, 16, &c);
+ transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride);
+ }
+ else if(t==2) {
+ doVertDefFilter_altivec(tempBlock-48, 16, &c);
+ transpose_8x16_char_fromPackedAlign_altivec(dstBlock - (4 + 1), tempBlock, stride);
+ }
+#else
const int t= RENAME(horizClassify)(dstBlock-4, stride, &c);
if(t==1)
RENAME(doHorizLowPass)(dstBlock-4, stride, &c);
else if(t==2)
RENAME(doHorizDefFilter)(dstBlock-4, stride, &c);
+#endif
+ }else if(mode & H_A_DEBLOCK){
+ RENAME(do_a_deblock)(dstBlock-8, 1, stride, &c);
}
#endif
if(mode & DERING)
@@ -3190,8 +3785,8 @@ static void RENAME(postProcess)(uint8_t src[], int srcStride, uint8_t dst[], int
if(y+15 >= height)
{
uint8_t *dstBlock= &(dst[y*dstStride]);
- if(width==dstStride)
- memcpy(dstBlock, tempDst + dstStride, dstStride*(height-y));
+ if(width==ABS(dstStride))
+ linecpy(dstBlock, tempDst + dstStride, height-y, dstStride);
else
{
int i;