diff options
author | Mike Melanson <mike@multimedia.cx> | 2006-08-02 07:02:37 +0000 |
---|---|---|
committer | Mike Melanson <mike@multimedia.cx> | 2006-08-02 07:02:37 +0000 |
commit | 5f24c84bddd59e2d1744408f2c3694050d7d3d3d (patch) | |
tree | 044d31ff791ca6d0d60b5624bd31554f7da53876 /src/libffmpeg/libavcodec/i386 | |
parent | feaf0357aaa9f80562900ad9c50b9adf44d9b795 (diff) | |
download | xine-lib-5f24c84bddd59e2d1744408f2c3694050d7d3d3d.tar.gz xine-lib-5f24c84bddd59e2d1744408f2c3694050d7d3d3d.tar.bz2 |
sync to FFmpeg 51.11.0
CVS patchset: 8146
CVS date: 2006/08/02 07:02:37
Diffstat (limited to 'src/libffmpeg/libavcodec/i386')
-rw-r--r-- | src/libffmpeg/libavcodec/i386/cputest.c | 34 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c | 261 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/dsputil_mmx.c | 205 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/fft_sse.c | 20 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/h264dsp_mmx.c | 587 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c | 2 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/mmx.h | 16 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/motion_est_mmx.c | 2 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c | 5 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c | 1 |
10 files changed, 844 insertions, 289 deletions
diff --git a/src/libffmpeg/libavcodec/i386/cputest.c b/src/libffmpeg/libavcodec/i386/cputest.c index 64656c65a..a66bdbe98 100644 --- a/src/libffmpeg/libavcodec/i386/cputest.c +++ b/src/libffmpeg/libavcodec/i386/cputest.c @@ -64,6 +64,8 @@ int mm_support(void) rval |= MM_MMXEXT | MM_SSE; if (std_caps & (1<<26)) rval |= MM_SSE2; + if (ecx & 1) + rval |= MM_SSE3; } cpuid(0x80000000, max_ext_level, ebx, ecx, edx); @@ -76,38 +78,10 @@ int mm_support(void) rval |= MM_3DNOWEXT; if (ext_caps & (1<<23)) rval |= MM_MMX; - } - - cpuid(0, eax, ebx, ecx, edx); - if ( ebx == 0x68747541 && - edx == 0x69746e65 && - ecx == 0x444d4163) { - /* AMD */ - if(ext_caps & (1<<22)) - rval |= MM_MMXEXT; - } else if (ebx == 0x746e6543 && - edx == 0x48727561 && - ecx == 0x736c7561) { /* "CentaurHauls" */ - /* VIA C3 */ - if(ext_caps & (1<<24)) - rval |= MM_MMXEXT; - } else if (ebx == 0x69727943 && - edx == 0x736e4978 && - ecx == 0x64616574) { - /* Cyrix Section */ - /* See if extended CPUID level 80000001 is supported */ - /* The value of CPUID/80000001 for the 6x86MX is undefined - according to the Cyrix CPU Detection Guide (Preliminary - Rev. 1.01 table 1), so we'll check the value of eax for - CPUID/0 to see if standard CPUID level 2 is supported. - According to the table, the only CPU which supports level - 2 is also the only one which supports extended CPUID levels. - */ - if (eax < 2) - return rval; - if (ext_caps & (1<<24)) + if (ext_caps & (1<<22)) rval |= MM_MMXEXT; } + #if 0 av_log(NULL, AV_LOG_DEBUG, "%s%s%s%s%s%s\n", (rval&MM_MMX) ? "MMX ":"", diff --git a/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c b/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c index d52938ccf..b49c880a7 100644 --- a/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c +++ b/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c @@ -37,112 +37,56 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1* assert(x<8 && y<8 && x>=0 && y>=0); - if(y==0) + if(y==0 || x==0) { - /* horizontal filter only */ - asm volatile("movd %0, %%mm5\n\t" - "punpcklwd %%mm5, %%mm5\n\t" - "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */ - "movq %1, %%mm4\n\t" - "pxor %%mm7, %%mm7\n\t" - "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */ - : : "rm" (x), "m" (ff_pw_8)); + /* 1 dimensional filter only */ + const int dxy = x ? 1 : stride; + + asm volatile( + "movd %0, %%mm5\n\t" + "movq %1, %%mm4\n\t" + "punpcklwd %%mm5, %%mm5\n\t" + "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */ + "movq %%mm4, %%mm6\n\t" + "pxor %%mm7, %%mm7\n\t" + "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */ + "psrlw $1, %%mm6\n\t" /* mm6 = 4 */ + :: "rm"(x+y), "m"(ff_pw_8)); for(i=0; i<h; i++) { asm volatile( /* mm0 = src[0..7], mm1 = src[1..8] */ "movq %0, %%mm0\n\t" - "movq %1, %%mm1\n\t" - : : "m" (src[0]), "m" (src[1])); + "movq %1, %%mm2\n\t" + :: "m"(src[0]), "m"(src[dxy])); asm volatile( - /* [mm2,mm3] = A * src[0..7] */ - "movq %%mm0, %%mm2\n\t" - "punpcklbw %%mm7, %%mm2\n\t" - "pmullw %%mm4, %%mm2\n\t" - "movq %%mm0, %%mm3\n\t" - "punpckhbw %%mm7, %%mm3\n\t" - "pmullw %%mm4, %%mm3\n\t" - - /* [mm2,mm3] += B * src[1..8] */ - "movq %%mm1, %%mm0\n\t" + /* [mm0,mm1] = A * src[0..7] */ + /* [mm2,mm3] = B * src[1..8] */ + "movq %%mm0, %%mm1\n\t" + "movq %%mm2, %%mm3\n\t" "punpcklbw %%mm7, %%mm0\n\t" - "pmullw %%mm5, %%mm0\n\t" "punpckhbw %%mm7, %%mm1\n\t" - "pmullw %%mm5, %%mm1\n\t" - "paddw %%mm0, %%mm2\n\t" - "paddw %%mm1, %%mm3\n\t" - - /* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */ - "paddw %1, %%mm2\n\t" - "paddw %1, %%mm3\n\t" - "psrlw $3, %%mm2\n\t" - "psrlw $3, %%mm3\n\t" - "packuswb %%mm3, %%mm2\n\t" - H264_CHROMA_OP(%0, %%mm2) - "movq %%mm2, %0\n\t" - : "=m" (dst[0]) : "m" (ff_pw_4)); - - src += stride; - dst += stride; - } - return; - } - - if(x==0) - { - /* vertical filter only */ - asm volatile("movd %0, %%mm6\n\t" - "punpcklwd %%mm6, %%mm6\n\t" - "punpckldq %%mm6, %%mm6\n\t" /* mm6 = C = y */ - "movq %1, %%mm4\n\t" - "pxor %%mm7, %%mm7\n\t" - "psubw %%mm6, %%mm4\n\t" /* mm4 = A = 8-y */ - : : "rm" (y), "m" (ff_pw_8)); - - asm volatile( - /* mm0 = src[0..7] */ - "movq %0, %%mm0\n\t" - : : "m" (src[0])); - - for(i=0; i<h; i++) { - asm volatile( - /* [mm2,mm3] = A * src[0..7] */ - "movq %mm0, %mm2\n\t" - "punpcklbw %mm7, %mm2\n\t" - "pmullw %mm4, %mm2\n\t" - "movq %mm0, %mm3\n\t" - "punpckhbw %mm7, %mm3\n\t" - "pmullw %mm4, %mm3\n\t"); + "punpcklbw %%mm7, %%mm2\n\t" + "punpckhbw %%mm7, %%mm3\n\t" + "pmullw %%mm4, %%mm0\n\t" + "pmullw %%mm4, %%mm1\n\t" + "pmullw %%mm5, %%mm2\n\t" + "pmullw %%mm5, %%mm3\n\t" + + /* dst[0..7] = (A * src[0..7] + B * src[1..8] + 4) >> 3 */ + "paddw %%mm6, %%mm0\n\t" + "paddw %%mm6, %%mm1\n\t" + "paddw %%mm2, %%mm0\n\t" + "paddw %%mm3, %%mm1\n\t" + "psrlw $3, %%mm0\n\t" + "psrlw $3, %%mm1\n\t" + "packuswb %%mm1, %%mm0\n\t" + H264_CHROMA_OP(%0, %%mm0) + "movq %%mm0, %0\n\t" + : "=m" (dst[0])); src += stride; - asm volatile( - /* mm0 = src[0..7] */ - "movq %0, %%mm0\n\t" - : : "m" (src[0])); - - asm volatile( - /* [mm2,mm3] += C * src[0..7] */ - "movq %mm0, %mm1\n\t" - "punpcklbw %mm7, %mm1\n\t" - "pmullw %mm6, %mm1\n\t" - "paddw %mm1, %mm2\n\t" - "movq %mm0, %mm5\n\t" - "punpckhbw %mm7, %mm5\n\t" - "pmullw %mm6, %mm5\n\t" - "paddw %mm5, %mm3\n\t"); - - asm volatile( - /* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */ - "paddw %1, %%mm2\n\t" - "paddw %1, %%mm3\n\t" - "psrlw $3, %%mm2\n\t" - "psrlw $3, %%mm3\n\t" - "packuswb %%mm3, %%mm2\n\t" - H264_CHROMA_OP(%0, %%mm2) - "movq %%mm2, %0\n\t" - : "=m" (dst[0]) : "m" (ff_pw_4)); - dst += stride; } return; @@ -177,57 +121,53 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1* : : "m" (src[0]), "m" (src[1])); for(i=0; i<h; i++) { + src += stride; + asm volatile( - /* [mm2,mm3] = A * src[0..7] */ + /* mm2 = A * src[0..3] + B * src[1..4] */ + /* mm3 = A * src[4..7] + B * src[5..8] */ "movq %%mm0, %%mm2\n\t" + "movq %%mm1, %%mm3\n\t" + "punpckhbw %%mm7, %%mm0\n\t" + "punpcklbw %%mm7, %%mm1\n\t" "punpcklbw %%mm7, %%mm2\n\t" - "pmullw %0, %%mm2\n\t" - "movq %%mm0, %%mm3\n\t" "punpckhbw %%mm7, %%mm3\n\t" - "pmullw %0, %%mm3\n\t" + "pmullw %0, %%mm0\n\t" + "pmullw %0, %%mm2\n\t" + "pmullw %%mm5, %%mm1\n\t" + "pmullw %%mm5, %%mm3\n\t" + "paddw %%mm1, %%mm2\n\t" + "paddw %%mm0, %%mm3\n\t" + : : "m" (AA)); - /* [mm2,mm3] += B * src[1..8] */ - "movq %%mm1, %%mm0\n\t" + asm volatile( + /* [mm2,mm3] += C * src[0..7] */ + "movq %0, %%mm0\n\t" + "movq %%mm0, %%mm1\n\t" "punpcklbw %%mm7, %%mm0\n\t" - "pmullw %%mm5, %%mm0\n\t" "punpckhbw %%mm7, %%mm1\n\t" - "pmullw %%mm5, %%mm1\n\t" + "pmullw %%mm6, %%mm0\n\t" + "pmullw %%mm6, %%mm1\n\t" "paddw %%mm0, %%mm2\n\t" "paddw %%mm1, %%mm3\n\t" - : : "m" (AA)); + : : "m" (src[0])); - src += stride; asm volatile( - /* mm0 = src[0..7], mm1 = src[1..8] */ - "movq %0, %%mm0\n\t" + /* [mm2,mm3] += D * src[1..8] */ "movq %1, %%mm1\n\t" - : : "m" (src[0]), "m" (src[1])); - - asm volatile( - /* [mm2,mm3] += C * src[0..7] */ - "movq %mm0, %mm4\n\t" - "punpcklbw %mm7, %mm4\n\t" - "pmullw %mm6, %mm4\n\t" - "paddw %mm4, %mm2\n\t" - "movq %mm0, %mm4\n\t" - "punpckhbw %mm7, %mm4\n\t" - "pmullw %mm6, %mm4\n\t" - "paddw %mm4, %mm3\n\t"); - - asm volatile( - /* [mm2,mm3] += D * src[1..8] */ - "movq %%mm1, %%mm4\n\t" - "punpcklbw %%mm7, %%mm4\n\t" - "pmullw %0, %%mm4\n\t" - "paddw %%mm4, %%mm2\n\t" + "movq %%mm1, %%mm0\n\t" "movq %%mm1, %%mm4\n\t" + "punpcklbw %%mm7, %%mm0\n\t" "punpckhbw %%mm7, %%mm4\n\t" - "pmullw %0, %%mm4\n\t" + "pmullw %2, %%mm0\n\t" + "pmullw %2, %%mm4\n\t" + "paddw %%mm0, %%mm2\n\t" "paddw %%mm4, %%mm3\n\t" - : : "m" (DD)); + "movq %0, %%mm0\n\t" + : : "m" (src[0]), "m" (src[1]), "m" (DD)); asm volatile( - /* dst[0..7] = pack(([mm2,mm3] + 32) >> 6) */ + /* dst[0..7] = ([mm2,mm3] + 32) >> 6 */ "paddw %1, %%mm2\n\t" "paddw %1, %%mm3\n\t" "psrlw $6, %%mm2\n\t" @@ -240,7 +180,7 @@ static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1* } } -static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) +static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) { DECLARE_ALIGNED_8(uint64_t, AA); DECLARE_ALIGNED_8(uint64_t, DD); @@ -319,3 +259,66 @@ static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1* dst += stride; } } + +#ifdef H264_CHROMA_MC2_TMPL +static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) +{ + int CD=((1<<16)-1)*x*y + 8*y; + int AB=((8<<16)-8)*x + 64 - CD; + int i; + + asm volatile( + /* mm5 = {A,B,A,B} */ + /* mm6 = {C,D,C,D} */ + "movd %0, %%mm5\n\t" + "movd %1, %%mm6\n\t" + "punpckldq %%mm5, %%mm5\n\t" + "punpckldq %%mm6, %%mm6\n\t" + "pxor %%mm7, %%mm7\n\t" + :: "r"(AB), "r"(CD)); + + asm volatile( + /* mm0 = src[0,1,1,2] */ + "movd %0, %%mm0\n\t" + "punpcklbw %%mm7, %%mm0\n\t" + "pshufw $0x94, %%mm0, %%mm0\n\t" + :: "m"(src[0])); + + for(i=0; i<h; i++) { + asm volatile( + /* mm1 = A * src[0,1] + B * src[1,2] */ + "movq %%mm0, %%mm1\n\t" + "pmaddwd %%mm5, %%mm1\n\t" + ::); + + src += stride; + asm volatile( + /* mm0 = src[0,1,1,2] */ + "movd %0, %%mm0\n\t" + "punpcklbw %%mm7, %%mm0\n\t" + "pshufw $0x94, %%mm0, %%mm0\n\t" + :: "m"(src[0])); + + asm volatile( + /* mm1 += C * src[0,1] + D * src[1,2] */ + "movq %%mm0, %%mm2\n\t" + "pmaddwd %%mm6, %%mm2\n\t" + "paddw %%mm2, %%mm1\n\t" + ::); + + asm volatile( + /* dst[0,1] = pack((mm1 + 32) >> 6) */ + "paddw %1, %%mm1\n\t" + "psrlw $6, %%mm1\n\t" + "packssdw %%mm7, %%mm1\n\t" + "packuswb %%mm7, %%mm1\n\t" + /* writes garbage to the right of dst. + * ok because partitions are processed from left to right. */ + H264_CHROMA_OP4(%0, %%mm1, %%mm3) + "movd %%mm1, %0\n\t" + : "=m" (dst[0]) : "m" (ff_pw_32)); + dst += stride; + } +} +#endif + diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c index 7d69859a6..ec6b2ad1a 100644 --- a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c +++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c @@ -23,6 +23,7 @@ #include "../dsputil.h" #include "../simple_idct.h" #include "../mpegvideo.h" +#include "x86_cpu.h" #include "mmx.h" //#undef NDEBUG @@ -186,6 +187,11 @@ static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xF #undef DEF #undef PAVGB +#define SBUTTERFLY(a,b,t,n)\ + "movq " #a ", " #t " \n\t" /* abcd */\ + "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\ + "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\ + /***********************************/ /* standard MMX */ @@ -1522,11 +1528,6 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t "pmaxsw " #z ", " #a " \n\t"\ "paddusw " #a ", " #sum " \n\t" -#define SBUTTERFLY(a,b,t,n)\ - "movq " #a ", " #t " \n\t" /* abcd */\ - "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\ - "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\ - #define TRANSPOSE4(a,b,c,d,t)\ SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\ SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\ @@ -2403,6 +2404,124 @@ static void just_return() { return; } c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\ c->avg_ ## postfix1 = avg_ ## postfix2; +static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, + int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){ + const int w = 8; + const int ix = ox>>(16+shift); + const int iy = oy>>(16+shift); + const int oxs = ox>>4; + const int oys = oy>>4; + const int dxxs = dxx>>4; + const int dxys = dxy>>4; + const int dyxs = dyx>>4; + const int dyys = dyy>>4; + const uint16_t r4[4] = {r,r,r,r}; + const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; + const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; + const uint64_t shift2 = 2*shift; + uint8_t edge_buf[(h+1)*stride]; + int x, y; + + const int dxw = (dxx-(1<<(16+shift)))*(w-1); + const int dyh = (dyy-(1<<(16+shift)))*(h-1); + const int dxh = dxy*(h-1); + const int dyw = dyx*(w-1); + if( // non-constant fullpel offset (3% of blocks) + (ox^(ox+dxw) | ox^(ox+dxh) | ox^(ox+dxw+dxh) | + oy^(oy+dyw) | oy^(oy+dyh) | oy^(oy+dyw+dyh)) >> (16+shift) + // uses more than 16 bits of subpel mv (only at huge resolution) + || (dxx|dxy|dyx|dyy)&15 ) + { + //FIXME could still use mmx for some of the rows + ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); + return; + } + + src += ix + iy*stride; + if( (unsigned)ix >= width-w || + (unsigned)iy >= height-h ) + { + ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); + src = edge_buf; + } + + asm volatile( + "movd %0, %%mm6 \n\t" + "pxor %%mm7, %%mm7 \n\t" + "punpcklwd %%mm6, %%mm6 \n\t" + "punpcklwd %%mm6, %%mm6 \n\t" + :: "r"(1<<shift) + ); + + for(x=0; x<w; x+=4){ + uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0), + oxs - dxys + dxxs*(x+1), + oxs - dxys + dxxs*(x+2), + oxs - dxys + dxxs*(x+3) }; + uint16_t dy4[4] = { oys - dyys + dyxs*(x+0), + oys - dyys + dyxs*(x+1), + oys - dyys + dyxs*(x+2), + oys - dyys + dyxs*(x+3) }; + + for(y=0; y<h; y++){ + asm volatile( + "movq %0, %%mm4 \n\t" + "movq %1, %%mm5 \n\t" + "paddw %2, %%mm4 \n\t" + "paddw %3, %%mm5 \n\t" + "movq %%mm4, %0 \n\t" + "movq %%mm5, %1 \n\t" + "psrlw $12, %%mm4 \n\t" + "psrlw $12, %%mm5 \n\t" + : "+m"(*dx4), "+m"(*dy4) + : "m"(*dxy4), "m"(*dyy4) + ); + + asm volatile( + "movq %%mm6, %%mm2 \n\t" + "movq %%mm6, %%mm1 \n\t" + "psubw %%mm4, %%mm2 \n\t" + "psubw %%mm5, %%mm1 \n\t" + "movq %%mm2, %%mm0 \n\t" + "movq %%mm4, %%mm3 \n\t" + "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy) + "pmullw %%mm5, %%mm3 \n\t" // dx*dy + "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy + "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy) + + "movd %4, %%mm5 \n\t" + "movd %3, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy + "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy + + "movd %2, %%mm5 \n\t" + "movd %1, %%mm4 \n\t" + "punpcklbw %%mm7, %%mm5 \n\t" + "punpcklbw %%mm7, %%mm4 \n\t" + "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy) + "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy) + "paddw %5, %%mm1 \n\t" + "paddw %%mm3, %%mm2 \n\t" + "paddw %%mm1, %%mm0 \n\t" + "paddw %%mm2, %%mm0 \n\t" + + "psrlw %6, %%mm0 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "movd %%mm0, %0 \n\t" + + : "=m"(dst[x+y*stride]) + : "m"(src[0]), "m"(src[1]), + "m"(src[stride]), "m"(src[stride+1]), + "m"(*r4), "m"(shift2) + ); + src += stride; + } + src += 4-h*stride; + } +} + static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){ long i=0; @@ -2489,8 +2608,36 @@ static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){ } } +#define PREFETCH(name, op) \ +void name(void *mem, int stride, int h){\ + const uint8_t *p= mem;\ + do{\ + asm volatile(#op" %0" :: "m"(*p));\ + p+= stride;\ + }while(--h);\ +} +PREFETCH(prefetch_mmx2, prefetcht0) +PREFETCH(prefetch_3dnow, prefetch) +#undef PREFETCH + #include "h264dsp_mmx.c" +/* AVS specific */ +void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx); + +void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { + put_pixels8_mmx(dst, src, stride, 8); +} +void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { + avg_pixels8_mmx(dst, src, stride, 8); +} +void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { + put_pixels16_mmx(dst, src, stride, 16); +} +void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { + avg_pixels16_mmx(dst, src, stride, 16); +} + /* external functions, from idct_mmx.c */ void ff_mmx_idct(DCTELEM *block); void ff_mmxext_idct(DCTELEM *block); @@ -2564,6 +2711,17 @@ static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) } #endif +#ifdef CONFIG_SNOW_ENCODER +extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width); +extern void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width); +extern void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width); +extern void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width); +extern void ff_snow_inner_add_yblock_sse2(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, + int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); +extern void ff_snow_inner_add_yblock_mmx(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, + int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); +#endif + void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) { mm_flags = mm_support(); @@ -2622,6 +2780,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->idct = ff_mmx_idct; } c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; +#if 0 }else if(idct_algo==FF_IDCT_VP3){ if(mm_flags & MM_SSE2){ c->idct_put= ff_vp3_idct_put_sse2; @@ -2635,6 +2794,9 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->idct = ff_vp3_idct_mmx; c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; } +#endif + }else if(idct_algo==FF_IDCT_CAVS){ + c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; #ifdef CONFIG_GPL }else if(idct_algo==FF_IDCT_XVIDMMX){ if(mm_flags & MM_MMXEXT){ @@ -2702,6 +2864,8 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx; c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx; + c->gmc= gmc_mmx; + c->add_bytes= add_bytes_mmx; #ifdef CONFIG_ENCODERS c->diff_bytes= diff_bytes_mmx; @@ -2732,7 +2896,14 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx; c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx; + c->h264_idct_dc_add= + c->h264_idct_add= ff_h264_idct_add_mmx; + c->h264_idct8_dc_add= + c->h264_idct8_add= ff_h264_idct8_add_mmx; + if (mm_flags & MM_MMXEXT) { + c->prefetch = prefetch_mmx2; + c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; @@ -2753,7 +2924,8 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->vsad[4]= vsad_intra16_mmx2; #endif //CONFIG_ENCODERS - c->h264_idct_add= ff_h264_idct_add_mmx2; + c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2; + c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2; if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; @@ -2831,6 +3003,8 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; + c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; + c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2; c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2; c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2; c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2; @@ -2856,10 +3030,16 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2; c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2; +#ifdef CONFIG_CAVS_DECODER + ff_cavsdsp_init_mmx2(c, avctx); +#endif + #ifdef CONFIG_ENCODERS c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2; #endif //CONFIG_ENCODERS } else if (mm_flags & MM_3DNOW) { + c->prefetch = prefetch_3dnow; + c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; @@ -2944,6 +3124,19 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow; c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow; } + +#ifdef CONFIG_SNOW_ENCODER + if(mm_flags & MM_SSE2){ + c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; + c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; + c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; + } + else{ + c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; + c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; + c->inner_add_yblock = ff_snow_inner_add_yblock_mmx; + } +#endif } #ifdef CONFIG_ENCODERS diff --git a/src/libffmpeg/libavcodec/i386/fft_sse.c b/src/libffmpeg/libavcodec/i386/fft_sse.c index 54851fb94..631848265 100644 --- a/src/libffmpeg/libavcodec/i386/fft_sse.c +++ b/src/libffmpeg/libavcodec/i386/fft_sse.c @@ -23,14 +23,14 @@ #include <xmmintrin.h> -static const float p1p1p1m1[4] __attribute__((aligned(16))) = - { 1.0, 1.0, 1.0, -1.0 }; +static const int p1p1p1m1[4] __attribute__((aligned(16))) = + { 0, 0, 0, 1 << 31 }; -static const float p1p1m1p1[4] __attribute__((aligned(16))) = - { 1.0, 1.0, -1.0, 1.0 }; +static const int p1p1m1p1[4] __attribute__((aligned(16))) = + { 0, 0, 1 << 31, 0 }; -static const float p1p1m1m1[4] __attribute__((aligned(16))) = - { 1.0, 1.0, -1.0, -1.0 }; +static const int p1p1m1m1[4] __attribute__((aligned(16))) = + { 0, 0, 1 << 31, 1 << 31 }; #if 0 static void print_v4sf(const char *str, __m128 a) @@ -58,7 +58,6 @@ void ff_fft_calc_sse(FFTContext *s, FFTComplex *z) r = (__m128 *)&z[0]; c1 = *(__m128 *)p1p1m1m1; - c2 = *(__m128 *)p1p1p1m1; if (s->inverse) c2 = *(__m128 *)p1p1m1p1; else @@ -68,19 +67,20 @@ void ff_fft_calc_sse(FFTContext *s, FFTComplex *z) do { a = r[0]; b = _mm_shuffle_ps(a, a, _MM_SHUFFLE(1, 0, 3, 2)); - a = _mm_mul_ps(a, c1); + a = _mm_xor_ps(a, c1); /* do the pass 0 butterfly */ a = _mm_add_ps(a, b); a1 = r[1]; b = _mm_shuffle_ps(a1, a1, _MM_SHUFFLE(1, 0, 3, 2)); - a1 = _mm_mul_ps(a1, c1); + a1 = _mm_xor_ps(a1, c1); /* do the pass 0 butterfly */ b = _mm_add_ps(a1, b); /* multiply third by -i */ + /* by toggling the sign bit */ b = _mm_shuffle_ps(b, b, _MM_SHUFFLE(2, 3, 1, 0)); - b = _mm_mul_ps(b, c2); + b = _mm_xor_ps(b, c2); /* do the pass 1 butterfly */ r[0] = _mm_add_ps(a, b); diff --git a/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c b/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c index 8ab58f389..ac4ad6401 100644 --- a/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c +++ b/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c @@ -46,11 +46,6 @@ SUMSUBD2_AB( s13, d13, t )\ SUMSUB_BADC( d13, s02, s13, d02 ) -#define SBUTTERFLY(a,b,t,n)\ - "movq " #a ", " #t " \n\t" /* abcd */\ - "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\ - "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\ - #define TRANSPOSE4(a,b,c,d,t)\ SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\ SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\ @@ -65,7 +60,7 @@ "packuswb "#z", "#p" \n\t"\ "movd "#p", (%0) \n\t" -void ff_h264_idct_add_mmx2(uint8_t *dst, int16_t *block, int stride) +static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride) { /* Load dct coeffs */ asm volatile( @@ -104,6 +99,208 @@ void ff_h264_idct_add_mmx2(uint8_t *dst, int16_t *block, int stride) ); } +static inline void h264_idct8_1d(int16_t *block) +{ + asm volatile( + "movq 112(%0), %%mm7 \n\t" + "movq 80(%0), %%mm5 \n\t" + "movq 48(%0), %%mm3 \n\t" + "movq 16(%0), %%mm1 \n\t" + + "movq %%mm7, %%mm4 \n\t" + "movq %%mm3, %%mm6 \n\t" + "movq %%mm5, %%mm0 \n\t" + "movq %%mm7, %%mm2 \n\t" + "psraw $1, %%mm4 \n\t" + "psraw $1, %%mm6 \n\t" + "psubw %%mm7, %%mm0 \n\t" + "psubw %%mm6, %%mm2 \n\t" + "psubw %%mm4, %%mm0 \n\t" + "psubw %%mm3, %%mm2 \n\t" + "psubw %%mm3, %%mm0 \n\t" + "paddw %%mm1, %%mm2 \n\t" + + "movq %%mm5, %%mm4 \n\t" + "movq %%mm1, %%mm6 \n\t" + "psraw $1, %%mm4 \n\t" + "psraw $1, %%mm6 \n\t" + "paddw %%mm5, %%mm4 \n\t" + "paddw %%mm1, %%mm6 \n\t" + "paddw %%mm7, %%mm4 \n\t" + "paddw %%mm5, %%mm6 \n\t" + "psubw %%mm1, %%mm4 \n\t" + "paddw %%mm3, %%mm6 \n\t" + + "movq %%mm0, %%mm1 \n\t" + "movq %%mm4, %%mm3 \n\t" + "movq %%mm2, %%mm5 \n\t" + "movq %%mm6, %%mm7 \n\t" + "psraw $2, %%mm6 \n\t" + "psraw $2, %%mm3 \n\t" + "psraw $2, %%mm5 \n\t" + "psraw $2, %%mm0 \n\t" + "paddw %%mm6, %%mm1 \n\t" + "paddw %%mm2, %%mm3 \n\t" + "psubw %%mm4, %%mm5 \n\t" + "psubw %%mm0, %%mm7 \n\t" + + "movq 32(%0), %%mm2 \n\t" + "movq 96(%0), %%mm6 \n\t" + "movq %%mm2, %%mm4 \n\t" + "movq %%mm6, %%mm0 \n\t" + "psraw $1, %%mm4 \n\t" + "psraw $1, %%mm6 \n\t" + "psubw %%mm0, %%mm4 \n\t" + "paddw %%mm2, %%mm6 \n\t" + + "movq (%0), %%mm2 \n\t" + "movq 64(%0), %%mm0 \n\t" + SUMSUB_BA( %%mm0, %%mm2 ) + SUMSUB_BA( %%mm6, %%mm0 ) + SUMSUB_BA( %%mm4, %%mm2 ) + SUMSUB_BA( %%mm7, %%mm6 ) + SUMSUB_BA( %%mm5, %%mm4 ) + SUMSUB_BA( %%mm3, %%mm2 ) + SUMSUB_BA( %%mm1, %%mm0 ) + :: "r"(block) + ); +} + +static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) +{ + int i; + int16_t __attribute__ ((aligned(8))) b2[64]; + + block[0] += 32; + + for(i=0; i<2; i++){ + uint64_t tmp; + + h264_idct8_1d(block+4*i); + + asm volatile( + "movq %%mm7, %0 \n\t" + TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) + "movq %%mm0, 8(%1) \n\t" + "movq %%mm6, 24(%1) \n\t" + "movq %%mm7, 40(%1) \n\t" + "movq %%mm4, 56(%1) \n\t" + "movq %0, %%mm7 \n\t" + TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) + "movq %%mm7, (%1) \n\t" + "movq %%mm1, 16(%1) \n\t" + "movq %%mm0, 32(%1) \n\t" + "movq %%mm3, 48(%1) \n\t" + : "=m"(tmp) + : "r"(b2+32*i) + : "memory" + ); + } + + for(i=0; i<2; i++){ + h264_idct8_1d(b2+4*i); + + asm volatile( + "psraw $6, %%mm7 \n\t" + "psraw $6, %%mm6 \n\t" + "psraw $6, %%mm5 \n\t" + "psraw $6, %%mm4 \n\t" + "psraw $6, %%mm3 \n\t" + "psraw $6, %%mm2 \n\t" + "psraw $6, %%mm1 \n\t" + "psraw $6, %%mm0 \n\t" + + "movq %%mm7, (%0) \n\t" + "movq %%mm5, 16(%0) \n\t" + "movq %%mm3, 32(%0) \n\t" + "movq %%mm1, 48(%0) \n\t" + "movq %%mm0, 64(%0) \n\t" + "movq %%mm2, 80(%0) \n\t" + "movq %%mm4, 96(%0) \n\t" + "movq %%mm6, 112(%0) \n\t" + :: "r"(b2+4*i) + : "memory" + ); + } + + add_pixels_clamped_mmx(b2, dst, stride); +} + +static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) +{ + int dc = (block[0] + 32) >> 6; + asm volatile( + "movd %0, %%mm0 \n\t" + "pshufw $0, %%mm0, %%mm0 \n\t" + "pxor %%mm1, %%mm1 \n\t" + "psubw %%mm0, %%mm1 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "packuswb %%mm1, %%mm1 \n\t" + ::"r"(dc) + ); + asm volatile( + "movd %0, %%mm2 \n\t" + "movd %1, %%mm3 \n\t" + "movd %2, %%mm4 \n\t" + "movd %3, %%mm5 \n\t" + "paddusb %%mm0, %%mm2 \n\t" + "paddusb %%mm0, %%mm3 \n\t" + "paddusb %%mm0, %%mm4 \n\t" + "paddusb %%mm0, %%mm5 \n\t" + "psubusb %%mm1, %%mm2 \n\t" + "psubusb %%mm1, %%mm3 \n\t" + "psubusb %%mm1, %%mm4 \n\t" + "psubusb %%mm1, %%mm5 \n\t" + "movd %%mm2, %0 \n\t" + "movd %%mm3, %1 \n\t" + "movd %%mm4, %2 \n\t" + "movd %%mm5, %3 \n\t" + :"+m"(*(uint32_t*)(dst+0*stride)), + "+m"(*(uint32_t*)(dst+1*stride)), + "+m"(*(uint32_t*)(dst+2*stride)), + "+m"(*(uint32_t*)(dst+3*stride)) + ); +} + +static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) +{ + int dc = (block[0] + 32) >> 6; + int y; + asm volatile( + "movd %0, %%mm0 \n\t" + "pshufw $0, %%mm0, %%mm0 \n\t" + "pxor %%mm1, %%mm1 \n\t" + "psubw %%mm0, %%mm1 \n\t" + "packuswb %%mm0, %%mm0 \n\t" + "packuswb %%mm1, %%mm1 \n\t" + ::"r"(dc) + ); + for(y=2; y--; dst += 4*stride){ + asm volatile( + "movq %0, %%mm2 \n\t" + "movq %1, %%mm3 \n\t" + "movq %2, %%mm4 \n\t" + "movq %3, %%mm5 \n\t" + "paddusb %%mm0, %%mm2 \n\t" + "paddusb %%mm0, %%mm3 \n\t" + "paddusb %%mm0, %%mm4 \n\t" + "paddusb %%mm0, %%mm5 \n\t" + "psubusb %%mm1, %%mm2 \n\t" + "psubusb %%mm1, %%mm3 \n\t" + "psubusb %%mm1, %%mm4 \n\t" + "psubusb %%mm1, %%mm5 \n\t" + "movq %%mm2, %0 \n\t" + "movq %%mm3, %1 \n\t" + "movq %%mm4, %2 \n\t" + "movq %%mm5, %3 \n\t" + :"+m"(*(uint64_t*)(dst+0*stride)), + "+m"(*(uint64_t*)(dst+1*stride)), + "+m"(*(uint64_t*)(dst+2*stride)), + "+m"(*(uint64_t*)(dst+3*stride)) + ); + } +} + /***********************************/ /* deblocking */ @@ -441,6 +638,50 @@ static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, i : "memory"\ );\ }\ +static void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ + int h=4;\ + asm volatile(\ + "pxor %%mm7, %%mm7 \n\t"\ + "movq %0, %%mm4 \n\t"\ + "movq %1, %%mm5 \n\t"\ + :: "m"(ff_pw_5), "m"(ff_pw_16)\ + );\ + do{\ + asm volatile(\ + "movd -1(%0), %%mm1 \n\t"\ + "movd (%0), %%mm2 \n\t"\ + "movd 1(%0), %%mm3 \n\t"\ + "movd 2(%0), %%mm0 \n\t"\ + "punpcklbw %%mm7, %%mm1 \n\t"\ + "punpcklbw %%mm7, %%mm2 \n\t"\ + "punpcklbw %%mm7, %%mm3 \n\t"\ + "punpcklbw %%mm7, %%mm0 \n\t"\ + "paddw %%mm0, %%mm1 \n\t"\ + "paddw %%mm3, %%mm2 \n\t"\ + "movd -2(%0), %%mm0 \n\t"\ + "movd 3(%0), %%mm3 \n\t"\ + "punpcklbw %%mm7, %%mm0 \n\t"\ + "punpcklbw %%mm7, %%mm3 \n\t"\ + "paddw %%mm3, %%mm0 \n\t"\ + "psllw $2, %%mm2 \n\t"\ + "psubw %%mm1, %%mm2 \n\t"\ + "pmullw %%mm4, %%mm2 \n\t"\ + "paddw %%mm5, %%mm0 \n\t"\ + "paddw %%mm2, %%mm0 \n\t"\ + "movd (%2), %%mm3 \n\t"\ + "psraw $5, %%mm0 \n\t"\ + "packuswb %%mm0, %%mm0 \n\t"\ + PAVGB" %%mm3, %%mm0 \n\t"\ + OP(%%mm0, (%1),%%mm6, d)\ + "add %4, %0 \n\t"\ + "add %4, %1 \n\t"\ + "add %3, %2 \n\t"\ + : "+a"(src), "+c"(dst), "+d"(src2)\ + : "D"((long)src2Stride), "S"((long)dstStride)\ + : "memory"\ + );\ + }while(--h);\ +}\ static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ src -= 2*srcStride;\ asm volatile(\ @@ -591,11 +832,74 @@ static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, i );\ }\ \ -static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - int h= 2;\ +static void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ + int h=8;\ + asm volatile(\ + "pxor %%mm7, %%mm7 \n\t"\ + "movq %0, %%mm6 \n\t"\ + :: "m"(ff_pw_5)\ + );\ + do{\ + asm volatile(\ + "movq (%0), %%mm0 \n\t"\ + "movq 1(%0), %%mm2 \n\t"\ + "movq %%mm0, %%mm1 \n\t"\ + "movq %%mm2, %%mm3 \n\t"\ + "punpcklbw %%mm7, %%mm0 \n\t"\ + "punpckhbw %%mm7, %%mm1 \n\t"\ + "punpcklbw %%mm7, %%mm2 \n\t"\ + "punpckhbw %%mm7, %%mm3 \n\t"\ + "paddw %%mm2, %%mm0 \n\t"\ + "paddw %%mm3, %%mm1 \n\t"\ + "psllw $2, %%mm0 \n\t"\ + "psllw $2, %%mm1 \n\t"\ + "movq -1(%0), %%mm2 \n\t"\ + "movq 2(%0), %%mm4 \n\t"\ + "movq %%mm2, %%mm3 \n\t"\ + "movq %%mm4, %%mm5 \n\t"\ + "punpcklbw %%mm7, %%mm2 \n\t"\ + "punpckhbw %%mm7, %%mm3 \n\t"\ + "punpcklbw %%mm7, %%mm4 \n\t"\ + "punpckhbw %%mm7, %%mm5 \n\t"\ + "paddw %%mm4, %%mm2 \n\t"\ + "paddw %%mm3, %%mm5 \n\t"\ + "psubw %%mm2, %%mm0 \n\t"\ + "psubw %%mm5, %%mm1 \n\t"\ + "pmullw %%mm6, %%mm0 \n\t"\ + "pmullw %%mm6, %%mm1 \n\t"\ + "movd -2(%0), %%mm2 \n\t"\ + "movd 7(%0), %%mm5 \n\t"\ + "punpcklbw %%mm7, %%mm2 \n\t"\ + "punpcklbw %%mm7, %%mm5 \n\t"\ + "paddw %%mm3, %%mm2 \n\t"\ + "paddw %%mm5, %%mm4 \n\t"\ + "movq %5, %%mm5 \n\t"\ + "paddw %%mm5, %%mm2 \n\t"\ + "paddw %%mm5, %%mm4 \n\t"\ + "paddw %%mm2, %%mm0 \n\t"\ + "paddw %%mm4, %%mm1 \n\t"\ + "psraw $5, %%mm0 \n\t"\ + "psraw $5, %%mm1 \n\t"\ + "movq (%2), %%mm4 \n\t"\ + "packuswb %%mm1, %%mm0 \n\t"\ + PAVGB" %%mm4, %%mm0 \n\t"\ + OP(%%mm0, (%1),%%mm5, q)\ + "add %4, %0 \n\t"\ + "add %4, %1 \n\t"\ + "add %3, %2 \n\t"\ + : "+a"(src), "+c"(dst), "+d"(src2)\ + : "D"((long)src2Stride), "S"((long)dstStride),\ + "m"(ff_pw_16)\ + : "memory"\ + );\ + }while(--h);\ +}\ +\ +static inline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ + int w= 2;\ src -= 2*srcStride;\ \ - while(h--){\ + while(w--){\ asm volatile(\ "pxor %%mm7, %%mm7 \n\t"\ "movd (%0), %%mm0 \n\t"\ @@ -626,13 +930,29 @@ static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, i : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ : "memory"\ );\ - src += 4-13*srcStride;\ - dst += 4-8*dstStride;\ + if(h==16){\ + asm volatile(\ + QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ + QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ + QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ + QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ + QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ + QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ + QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ + QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ + \ + : "+a"(src), "+c"(dst)\ + : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ + : "memory"\ + );\ + }\ + src += 4-(h+5)*srcStride;\ + dst += 4-h*dstStride;\ }\ }\ -static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ - int h=8;\ - int w=4;\ +static inline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ + int h = size;\ + int w = (size+8)>>2;\ src -= 2*srcStride+2;\ while(w--){\ asm volatile(\ @@ -652,23 +972,40 @@ static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, "punpcklbw %%mm7, %%mm2 \n\t"\ "punpcklbw %%mm7, %%mm3 \n\t"\ "punpcklbw %%mm7, %%mm4 \n\t"\ - QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*4)\ - QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*4)\ - QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*4)\ - QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*4)\ - QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*8*4)\ - QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*8*4)\ - QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*8*4)\ - QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*8*4)\ - \ + QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\ + QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\ + QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\ + QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\ + QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\ + QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\ + QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\ + QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\ : "+a"(src)\ : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\ : "memory"\ );\ + if(size==16){\ + asm volatile(\ + QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\ + QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\ + QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\ + QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\ + QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\ + QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\ + QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\ + QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\ + : "+a"(src)\ + : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\ + : "memory"\ + );\ + }\ tmp += 4;\ - src += 4 - 13*srcStride;\ + src += 4 - (size+5)*srcStride;\ }\ - tmp -= 4*4;\ + tmp -= size+8;\ + w = size>>4;\ + do{\ + h = size;\ asm volatile(\ "movq %4, %%mm6 \n\t"\ "1: \n\t"\ @@ -702,7 +1039,7 @@ static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, "psraw $6, %%mm3 \n\t"\ "packuswb %%mm3, %%mm0 \n\t"\ OP(%%mm0, (%1),%%mm7, q)\ - "add $32, %0 \n\t"\ + "add $48, %0 \n\t"\ "add %3, %1 \n\t"\ "decl %2 \n\t"\ " jnz 1b \n\t"\ @@ -710,14 +1047,17 @@ static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, : "S"((long)dstStride), "m"(ff_pw_32)\ : "memory"\ );\ + tmp += 8 - size*24;\ + dst += 8 - size*dstStride;\ + }while(w--);\ +}\ +\ +static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ + OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ }\ static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ - OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ - src += 8*srcStride;\ - dst += 8*dstStride;\ - OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ - OPNAME ## h264_qpel8_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ + OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ + OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ }\ \ static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ @@ -729,14 +1069,88 @@ static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ }\ \ -static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ - OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\ - OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\ - src += 8*srcStride;\ +static void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ + OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ + OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ + src += 8*dstStride;\ dst += 8*dstStride;\ - OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride);\ - OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(dst+8, tmp , src+8, dstStride, tmpStride, srcStride);\ + src2 += 8*src2Stride;\ + OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ + OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ +}\ +\ +static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ + OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\ +}\ +\ +static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ + OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\ +}\ +\ +static void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ +{\ + asm volatile(\ + "movq %5, %%mm6 \n\t"\ + "movq (%1), %%mm0 \n\t"\ + "movq 24(%1), %%mm1 \n\t"\ + "paddw %%mm6, %%mm0 \n\t"\ + "paddw %%mm6, %%mm1 \n\t"\ + "psraw $5, %%mm0 \n\t"\ + "psraw $5, %%mm1 \n\t"\ + "packuswb %%mm0, %%mm0 \n\t"\ + "packuswb %%mm1, %%mm1 \n\t"\ + PAVGB" (%0), %%mm0 \n\t"\ + PAVGB" (%0,%3), %%mm1 \n\t"\ + OP(%%mm0, (%2), %%mm4, d)\ + OP(%%mm1, (%2,%4), %%mm5, d)\ + "lea (%0,%3,2), %0 \n\t"\ + "lea (%2,%4,2), %2 \n\t"\ + "movq 48(%1), %%mm0 \n\t"\ + "movq 72(%1), %%mm1 \n\t"\ + "paddw %%mm6, %%mm0 \n\t"\ + "paddw %%mm6, %%mm1 \n\t"\ + "psraw $5, %%mm0 \n\t"\ + "psraw $5, %%mm1 \n\t"\ + "packuswb %%mm0, %%mm0 \n\t"\ + "packuswb %%mm1, %%mm1 \n\t"\ + PAVGB" (%0), %%mm0 \n\t"\ + PAVGB" (%0,%3), %%mm1 \n\t"\ + OP(%%mm0, (%2), %%mm4, d)\ + OP(%%mm1, (%2,%4), %%mm5, d)\ + :"+a"(src8), "+c"(src16), "+d"(dst)\ + :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\ + :"memory");\ +}\ +static void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ +{\ + asm volatile(\ + "movq %0, %%mm6 \n\t"\ + ::"m"(ff_pw_16)\ + );\ + while(h--){\ + asm volatile(\ + "movq (%1), %%mm0 \n\t"\ + "movq 8(%1), %%mm1 \n\t"\ + "paddw %%mm6, %%mm0 \n\t"\ + "paddw %%mm6, %%mm1 \n\t"\ + "psraw $5, %%mm0 \n\t"\ + "psraw $5, %%mm1 \n\t"\ + "packuswb %%mm1, %%mm0 \n\t"\ + PAVGB" (%0), %%mm0 \n\t"\ + OP(%%mm0, (%2), %%mm5, q)\ + ::"a"(src8), "c"(src16), "d"(dst)\ + :"memory");\ + src8 += src8Stride;\ + src16 += 24;\ + dst += dstStride;\ + }\ }\ +static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ +{\ + OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\ + OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\ +}\ + #define H264_MC(OPNAME, SIZE, MMX) \ static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ @@ -744,10 +1158,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8];\ - uint8_t * const half= (uint8_t*)temp;\ - put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ @@ -755,10 +1166,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t * }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8];\ - uint8_t * const half= (uint8_t*)temp;\ - put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(half, src, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+1, half, stride, stride, SIZE);\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ @@ -780,89 +1188,72 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t * }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/4];\ - uint8_t * const halfH= (uint8_t*)temp;\ - uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ - put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\ + uint64_t temp[SIZE*SIZE/8];\ + uint8_t * const halfV= (uint8_t*)temp;\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/4];\ - uint8_t * const halfH= (uint8_t*)temp;\ - uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ - put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\ + uint64_t temp[SIZE*SIZE/8];\ + uint8_t * const halfV= (uint8_t*)temp;\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/4];\ - uint8_t * const halfH= (uint8_t*)temp;\ - uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ - put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\ + uint64_t temp[SIZE*SIZE/8];\ + uint8_t * const halfV= (uint8_t*)temp;\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/4];\ - uint8_t * const halfH= (uint8_t*)temp;\ - uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ - put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\ + uint64_t temp[SIZE*SIZE/8];\ + uint8_t * const halfV= (uint8_t*)temp;\ put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfV, stride, SIZE, SIZE);\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4];\ + uint64_t temp[SIZE*(SIZE<8?12:24)/4];\ int16_t * const tmp= (int16_t*)temp;\ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\ - uint8_t * const halfH= (uint8_t*)temp;\ - uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ - int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ - put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src, SIZE, stride);\ + uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ + uint8_t * const halfHV= (uint8_t*)temp;\ + int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\ - uint8_t * const halfH= (uint8_t*)temp;\ - uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ - int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ - put_h264_qpel ## SIZE ## _h_lowpass_ ## MMX(halfH, src + stride, SIZE, stride);\ + uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ + uint8_t * const halfHV= (uint8_t*)temp;\ + int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\ put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfH, halfHV, stride, SIZE, SIZE);\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\ - uint8_t * const halfV= (uint8_t*)temp;\ - uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ - int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ - put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ - put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\ + uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ + int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\ + uint8_t * const halfHV= ((uint8_t*)temp);\ + put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4];\ - uint8_t * const halfV= (uint8_t*)temp;\ - uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ - int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ - put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ - put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, halfV, halfHV, stride, SIZE, SIZE);\ + uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ + int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\ + uint8_t * const halfHV= ((uint8_t*)temp);\ + put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\ }\ -#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" #define AVG_3DNOW_OP(a,b,temp, size) \ "mov" #size " " #b ", " #temp " \n\t"\ "pavgusb " #temp ", " #a " \n\t"\ @@ -872,10 +1263,14 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t * "pavgb " #temp ", " #a " \n\t"\ "mov" #size " " #a ", " #b " \n\t" +#define PAVGB "pavgusb" QPEL_H264(put_, PUT_OP, 3dnow) QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow) +#undef PAVGB +#define PAVGB "pavgb" QPEL_H264(put_, PUT_OP, mmx2) QPEL_H264(avg_, AVG_MMX2_OP, mmx2) +#undef PAVGB H264_MC(put_, 4, 3dnow) H264_MC(put_, 8, 3dnow) @@ -895,12 +1290,14 @@ H264_MC(avg_, 16,mmx2) #define H264_CHROMA_OP4(S,D,T) #define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx #define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx +#define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2 #define H264_CHROMA_MC8_MV0 put_pixels8_mmx #include "dsputil_h264_template_mmx.c" #undef H264_CHROMA_OP #undef H264_CHROMA_OP4 #undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC4_TMPL +#undef H264_CHROMA_MC2_TMPL #undef H264_CHROMA_MC8_MV0 #define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t" @@ -908,12 +1305,14 @@ H264_MC(avg_, 16,mmx2) "pavgb " #T ", " #D " \n\t" #define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2 #define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2 +#define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2 #define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 #include "dsputil_h264_template_mmx.c" #undef H264_CHROMA_OP #undef H264_CHROMA_OP4 #undef H264_CHROMA_MC8_TMPL #undef H264_CHROMA_MC4_TMPL +#undef H264_CHROMA_MC2_TMPL #undef H264_CHROMA_MC8_MV0 #define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t" diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c b/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c index 7bc6f5f78..a55d4ea07 100644 --- a/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c +++ b/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c @@ -20,7 +20,7 @@ // * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA // // * -// * $Id: idct_mmx_xvid.c,v 1.2 2006/02/05 14:11:36 miguelfreitas Exp $ +// * $Id: idct_mmx_xvid.c,v 1.3 2006/08/02 07:02:41 tmmm Exp $ // * // ***************************************************************************/ diff --git a/src/libffmpeg/libavcodec/i386/mmx.h b/src/libffmpeg/libavcodec/i386/mmx.h index df4620e0a..eab051341 100644 --- a/src/libffmpeg/libavcodec/i386/mmx.h +++ b/src/libffmpeg/libavcodec/i386/mmx.h @@ -5,22 +5,6 @@ #ifndef AVCODEC_I386MMX_H #define AVCODEC_I386MMX_H -#ifdef ARCH_X86_64 -# define REG_a "rax" -# define REG_b "rbx" -# define REG_c "rcx" -# define REG_d "rdx" -# define REG_D "rdi" -# define REG_S "rsi" -#else -# define REG_a "eax" -# define REG_b "ebx" -# define REG_c "ecx" -# define REG_d "edx" -# define REG_D "edi" -# define REG_S "esi" -#endif - /* * The type of an value that fits in an MMX register (note that long * long constant values MUST be suffixed by LL and unsigned long long diff --git a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c b/src/libffmpeg/libavcodec/i386/motion_est_mmx.c index c14b79384..edcabcf38 100644 --- a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c +++ b/src/libffmpeg/libavcodec/i386/motion_est_mmx.c @@ -20,7 +20,7 @@ * mostly by Michael Niedermayer <michaelni@gmx.at> */ #include "../dsputil.h" -#include "mmx.h" +#include "x86_cpu.h" static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={ 0x0000000000000000ULL, diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c index f83df3a19..c00a602bd 100644 --- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c +++ b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c @@ -23,7 +23,7 @@ #include "../dsputil.h" #include "../mpegvideo.h" #include "../avcodec.h" -#include "mmx.h" +#include "x86_cpu.h" extern uint8_t zigzag_direct_noperm[64]; extern uint16_t inv_zigzag_direct16[64]; @@ -699,7 +699,8 @@ void MPV_common_init_mmx(MpegEncContext *s) s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx; s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx; s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx; - s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx; + if(!(s->flags & CODEC_FLAG_BITEXACT)) + s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx; draw_edges = draw_edges_mmx; diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c index 2c50df232..de2ef08e5 100644 --- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c +++ b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c @@ -19,6 +19,7 @@ */ #undef SPREADW #undef PMAXW +#undef PMAX #ifdef HAVE_MMX2 #define SPREADW(a) "pshufw $0, " #a ", " #a " \n\t" #define PMAXW(a,b) "pmaxsw " #a ", " #b " \n\t" |