diff options
Diffstat (limited to 'src/libffmpeg/libavcodec/i386')
22 files changed, 0 insertions, 13560 deletions
diff --git a/src/libffmpeg/libavcodec/i386/Makefile.am b/src/libffmpeg/libavcodec/i386/Makefile.am deleted file mode 100644 index 039c50d61..000000000 --- a/src/libffmpeg/libavcodec/i386/Makefile.am +++ /dev/null @@ -1,57 +0,0 @@ -include $(top_srcdir)/misc/Makefile.common - -# -fomit-frame-pointer is always needed. it might cause debug to not -# work, but at least it compiles. -AM_CFLAGS = -fomit-frame-pointer -fno-strict-aliasing -# CFLAGS is here to filter out -funroll-loops because it causes bad -# behavior of libavcodec -CFLAGS := `echo @CFLAGS@ | sed -e 's/-funroll-loops//g'` -AM_CPPFLAGS = $(LIBFFMPEG_CPPFLAGS) -I$(top_srcdir)/src/libffmpeg/libavutil -I$(top_srcdir)/src/libffmpeg -I$(top_builddir)/src/libffmpeg - -# Avoid "can't find register" failures with -O1 and higher -dsputil_mmx.o dsputil_mmx.lo: CFLAGS=$(shell echo @CFLAGS@ | sed -e 's/-funroll-loops//g; s/$$/ -Os/') -if ARCH_X86_32 -dsputil_mmx.o dsputil_mmx.lo: CFLAGS+=-prefer-non-pic -endif - -# Avoid errors on (at least) amd64 with -O0 -fdct_mmx.o fdct_mmx.lo: CFLAGS=`echo @CFLAGS@ | sed -e 's/^/-Os /; s/-O0\?\s/-Os /g'` - -# Avoid errors with -O0 -mpegvideo_mmx.o mpegvideo_mmx.lo: CFLAGS=`echo @CFLAGS@ | sed -e 's/-O0\?\s/-Os /g'` - -ASFLAGS = - -noinst_LTLIBRARIES = libavcodec_mmx.la - -libavcodec_mmx_src = \ - cputest.c \ - dsputil_mmx.c \ - fdct_mmx.c \ - fft_3dn.c \ - fft_3dn2.c \ - fft_sse.c \ - idct_mmx.c \ - idct_mmx_xvid.c \ - motion_est_mmx.c \ - mpegvideo_mmx.c \ - simple_idct_mmx.c \ - vp3dsp_mmx.c \ - vp3dsp_sse2.c - -libavcodec_mmx_dummy = libavcodec_mmx_dummy.c - -EXTRA_DIST = \ - $(libavcodec_mmx_dummy) \ - $(libavcodec_mmx_src) \ - dsputil_h264_template_mmx.c \ - h264dsp_mmx.c \ - mpegvideo_mmx_template.c - -if HAVE_MMX -mmx_modules = $(libavcodec_mmx_src) -endif - -libavcodec_mmx_la_SOURCES = $(mmx_modules) $(libavcodec_mmx_dummy) - -noinst_HEADERS = dsputil_mmx_avg.h dsputil_mmx_rnd.h mathops.h mmx.h diff --git a/src/libffmpeg/libavcodec/i386/cputest.c b/src/libffmpeg/libavcodec/i386/cputest.c deleted file mode 100644 index 0705ab3e5..000000000 --- a/src/libffmpeg/libavcodec/i386/cputest.c +++ /dev/null @@ -1,130 +0,0 @@ -/* - * CPU detection code, extracted from mmx.h - * (c)1997-99 by H. Dietz and R. Fisher - * Converted to C and improved by Fabrice Bellard. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include <stdlib.h> -#include "../dsputil.h" - -#undef printf - -#ifdef ARCH_X86_64 -# define REG_b "rbx" -# define REG_S "rsi" -#else -# define REG_b "ebx" -# define REG_S "esi" -#endif - -/* ebx saving is necessary for PIC. gcc seems unable to see it alone */ -#define cpuid(index,eax,ebx,ecx,edx)\ - __asm __volatile\ - ("mov %%"REG_b", %%"REG_S"\n\t"\ - "cpuid\n\t"\ - "xchg %%"REG_b", %%"REG_S\ - : "=a" (eax), "=S" (ebx),\ - "=c" (ecx), "=d" (edx)\ - : "0" (index)); - -/* Function to test if multimedia instructions are supported... */ -int mm_support(void) -{ - int rval = 0; - int eax, ebx, ecx, edx; - int max_std_level, max_ext_level, std_caps=0, ext_caps=0; - long a, c; - - __asm__ __volatile__ ( - /* See if CPUID instruction is supported ... */ - /* ... Get copies of EFLAGS into eax and ecx */ - "pushf\n\t" - "pop %0\n\t" - "mov %0, %1\n\t" - - /* ... Toggle the ID bit in one copy and store */ - /* to the EFLAGS reg */ - "xor $0x200000, %0\n\t" - "push %0\n\t" - "popf\n\t" - - /* ... Get the (hopefully modified) EFLAGS */ - "pushf\n\t" - "pop %0\n\t" - : "=a" (a), "=c" (c) - : - : "cc" - ); - - if (a == c) - return 0; /* CPUID not supported */ - - cpuid(0, max_std_level, ebx, ecx, edx); - - if(max_std_level >= 1){ - cpuid(1, eax, ebx, ecx, std_caps); - if (std_caps & (1<<23)) - rval |= MM_MMX; - if (std_caps & (1<<25)) - rval |= MM_MMXEXT | MM_SSE; - if (std_caps & (1<<26)) - rval |= MM_SSE2; - if (ecx & 1) - rval |= MM_SSE3; - if (ecx & 0x00000200 ) - rval |= MM_SSSE3; - } - - cpuid(0x80000000, max_ext_level, ebx, ecx, edx); - - if(max_ext_level >= 0x80000001){ - cpuid(0x80000001, eax, ebx, ecx, ext_caps); - if (ext_caps & (1<<31)) - rval |= MM_3DNOW; - if (ext_caps & (1<<30)) - rval |= MM_3DNOWEXT; - if (ext_caps & (1<<23)) - rval |= MM_MMX; - if (ext_caps & (1<<22)) - rval |= MM_MMXEXT; - } - -#if 0 - av_log(NULL, AV_LOG_DEBUG, "%s%s%s%s%s%s%s%s\n", - (rval&MM_MMX) ? "MMX ":"", - (rval&MM_MMXEXT) ? "MMX2 ":"", - (rval&MM_SSE) ? "SSE ":"", - (rval&MM_SSE2) ? "SSE2 ":"", - (rval&MM_SSE3) ? "SSE3 ":"", - (rval&MM_SSSE3) ? "SSSE3 ":"", - (rval&MM_3DNOW) ? "3DNow ":"", - (rval&MM_3DNOWEXT) ? "3DNowExt ":""); -#endif - return rval; -} - -#ifdef __TEST__ -int main ( void ) -{ - int mm_flags; - mm_flags = mm_support(); - printf("mm_support = 0x%08X\n",mm_flags); - return 0; -} -#endif diff --git a/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c b/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c deleted file mode 100644 index e09a1007e..000000000 --- a/src/libffmpeg/libavcodec/i386/dsputil_h264_template_mmx.c +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>, - * Loren Merritt - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * MMX optimized version of (put|avg)_h264_chroma_mc8. - * H264_CHROMA_MC8_TMPL must be defined to the desired function name - * H264_CHROMA_OP must be defined to empty for put and pavgb/pavgusb for avg - * H264_CHROMA_MC8_MV0 must be defined to a (put|avg)_pixels8 function - */ -static void H264_CHROMA_MC8_TMPL(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) -{ - DECLARE_ALIGNED_8(uint64_t, AA); - DECLARE_ALIGNED_8(uint64_t, DD); - int i; - - if(y==0 && x==0) { - /* no filter needed */ - H264_CHROMA_MC8_MV0(dst, src, stride, h); - return; - } - - assert(x<8 && y<8 && x>=0 && y>=0); - - if(y==0 || x==0) - { - /* 1 dimensional filter only */ - const int dxy = x ? 1 : stride; - - asm volatile( - "movd %0, %%mm5\n\t" - "movq %1, %%mm4\n\t" - "punpcklwd %%mm5, %%mm5\n\t" - "punpckldq %%mm5, %%mm5\n\t" /* mm5 = B = x */ - "movq %%mm4, %%mm6\n\t" - "pxor %%mm7, %%mm7\n\t" - "psubw %%mm5, %%mm4\n\t" /* mm4 = A = 8-x */ - "psrlw $1, %%mm6\n\t" /* mm6 = 4 */ - :: "rm"(x+y), "m"(ff_pw_8)); - - for(i=0; i<h; i++) { - asm volatile( - /* mm0 = src[0..7], mm1 = src[1..8] */ - "movq %0, %%mm0\n\t" - "movq %1, %%mm2\n\t" - :: "m"(src[0]), "m"(src[dxy])); - - asm volatile( - /* [mm0,mm1] = A * src[0..7] */ - /* [mm2,mm3] = B * src[1..8] */ - "movq %%mm0, %%mm1\n\t" - "movq %%mm2, %%mm3\n\t" - "punpcklbw %%mm7, %%mm0\n\t" - "punpckhbw %%mm7, %%mm1\n\t" - "punpcklbw %%mm7, %%mm2\n\t" - "punpckhbw %%mm7, %%mm3\n\t" - "pmullw %%mm4, %%mm0\n\t" - "pmullw %%mm4, %%mm1\n\t" - "pmullw %%mm5, %%mm2\n\t" - "pmullw %%mm5, %%mm3\n\t" - - /* dst[0..7] = (A * src[0..7] + B * src[1..8] + 4) >> 3 */ - "paddw %%mm6, %%mm0\n\t" - "paddw %%mm6, %%mm1\n\t" - "paddw %%mm2, %%mm0\n\t" - "paddw %%mm3, %%mm1\n\t" - "psrlw $3, %%mm0\n\t" - "psrlw $3, %%mm1\n\t" - "packuswb %%mm1, %%mm0\n\t" - H264_CHROMA_OP(%0, %%mm0) - "movq %%mm0, %0\n\t" - : "=m" (dst[0])); - - src += stride; - dst += stride; - } - return; - } - - /* general case, bilinear */ - asm volatile("movd %2, %%mm4\n\t" - "movd %3, %%mm6\n\t" - "punpcklwd %%mm4, %%mm4\n\t" - "punpcklwd %%mm6, %%mm6\n\t" - "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */ - "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */ - "movq %%mm4, %%mm5\n\t" - "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */ - "psllw $3, %%mm5\n\t" - "psllw $3, %%mm6\n\t" - "movq %%mm5, %%mm7\n\t" - "paddw %%mm6, %%mm7\n\t" - "movq %%mm4, %1\n\t" /* DD = x * y */ - "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */ - "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */ - "paddw %4, %%mm4\n\t" - "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */ - "pxor %%mm7, %%mm7\n\t" - "movq %%mm4, %0\n\t" - : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64)); - - asm volatile( - /* mm0 = src[0..7], mm1 = src[1..8] */ - "movq %0, %%mm0\n\t" - "movq %1, %%mm1\n\t" - : : "m" (src[0]), "m" (src[1])); - - for(i=0; i<h; i++) { - src += stride; - - asm volatile( - /* mm2 = A * src[0..3] + B * src[1..4] */ - /* mm3 = A * src[4..7] + B * src[5..8] */ - "movq %%mm0, %%mm2\n\t" - "movq %%mm1, %%mm3\n\t" - "punpckhbw %%mm7, %%mm0\n\t" - "punpcklbw %%mm7, %%mm1\n\t" - "punpcklbw %%mm7, %%mm2\n\t" - "punpckhbw %%mm7, %%mm3\n\t" - "pmullw %0, %%mm0\n\t" - "pmullw %0, %%mm2\n\t" - "pmullw %%mm5, %%mm1\n\t" - "pmullw %%mm5, %%mm3\n\t" - "paddw %%mm1, %%mm2\n\t" - "paddw %%mm0, %%mm3\n\t" - : : "m" (AA)); - - asm volatile( - /* [mm2,mm3] += C * src[0..7] */ - "movq %0, %%mm0\n\t" - "movq %%mm0, %%mm1\n\t" - "punpcklbw %%mm7, %%mm0\n\t" - "punpckhbw %%mm7, %%mm1\n\t" - "pmullw %%mm6, %%mm0\n\t" - "pmullw %%mm6, %%mm1\n\t" - "paddw %%mm0, %%mm2\n\t" - "paddw %%mm1, %%mm3\n\t" - : : "m" (src[0])); - - asm volatile( - /* [mm2,mm3] += D * src[1..8] */ - "movq %1, %%mm1\n\t" - "movq %%mm1, %%mm0\n\t" - "movq %%mm1, %%mm4\n\t" - "punpcklbw %%mm7, %%mm0\n\t" - "punpckhbw %%mm7, %%mm4\n\t" - "pmullw %2, %%mm0\n\t" - "pmullw %2, %%mm4\n\t" - "paddw %%mm0, %%mm2\n\t" - "paddw %%mm4, %%mm3\n\t" - "movq %0, %%mm0\n\t" - : : "m" (src[0]), "m" (src[1]), "m" (DD)); - - asm volatile( - /* dst[0..7] = ([mm2,mm3] + 32) >> 6 */ - "paddw %1, %%mm2\n\t" - "paddw %1, %%mm3\n\t" - "psrlw $6, %%mm2\n\t" - "psrlw $6, %%mm3\n\t" - "packuswb %%mm3, %%mm2\n\t" - H264_CHROMA_OP(%0, %%mm2) - "movq %%mm2, %0\n\t" - : "=m" (dst[0]) : "m" (ff_pw_32)); - dst+= stride; - } -} - -static void H264_CHROMA_MC4_TMPL(uint8_t *dst/*align 4*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) -{ - DECLARE_ALIGNED_8(uint64_t, AA); - DECLARE_ALIGNED_8(uint64_t, DD); - int i; - - /* no special case for mv=(0,0) in 4x*, since it's much less common than in 8x*. - * could still save a few cycles, but maybe not worth the complexity. */ - - assert(x<8 && y<8 && x>=0 && y>=0); - - asm volatile("movd %2, %%mm4\n\t" - "movd %3, %%mm6\n\t" - "punpcklwd %%mm4, %%mm4\n\t" - "punpcklwd %%mm6, %%mm6\n\t" - "punpckldq %%mm4, %%mm4\n\t" /* mm4 = x words */ - "punpckldq %%mm6, %%mm6\n\t" /* mm6 = y words */ - "movq %%mm4, %%mm5\n\t" - "pmullw %%mm6, %%mm4\n\t" /* mm4 = x * y */ - "psllw $3, %%mm5\n\t" - "psllw $3, %%mm6\n\t" - "movq %%mm5, %%mm7\n\t" - "paddw %%mm6, %%mm7\n\t" - "movq %%mm4, %1\n\t" /* DD = x * y */ - "psubw %%mm4, %%mm5\n\t" /* mm5 = B = 8x - xy */ - "psubw %%mm4, %%mm6\n\t" /* mm6 = C = 8y - xy */ - "paddw %4, %%mm4\n\t" - "psubw %%mm7, %%mm4\n\t" /* mm4 = A = xy - (8x+8y) + 64 */ - "pxor %%mm7, %%mm7\n\t" - "movq %%mm4, %0\n\t" - : "=m" (AA), "=m" (DD) : "rm" (x), "rm" (y), "m" (ff_pw_64)); - - asm volatile( - /* mm0 = src[0..3], mm1 = src[1..4] */ - "movd %0, %%mm0\n\t" - "movd %1, %%mm1\n\t" - "punpcklbw %%mm7, %%mm0\n\t" - "punpcklbw %%mm7, %%mm1\n\t" - : : "m" (src[0]), "m" (src[1])); - - for(i=0; i<h; i++) { - asm volatile( - /* mm2 = A * src[0..3] + B * src[1..4] */ - "movq %%mm0, %%mm2\n\t" - "pmullw %0, %%mm2\n\t" - "pmullw %%mm5, %%mm1\n\t" - "paddw %%mm1, %%mm2\n\t" - : : "m" (AA)); - - src += stride; - asm volatile( - /* mm0 = src[0..3], mm1 = src[1..4] */ - "movd %0, %%mm0\n\t" - "movd %1, %%mm1\n\t" - "punpcklbw %%mm7, %%mm0\n\t" - "punpcklbw %%mm7, %%mm1\n\t" - : : "m" (src[0]), "m" (src[1])); - - asm volatile( - /* mm2 += C * src[0..3] + D * src[1..4] */ - "movq %%mm0, %%mm3\n\t" - "movq %%mm1, %%mm4\n\t" - "pmullw %%mm6, %%mm3\n\t" - "pmullw %0, %%mm4\n\t" - "paddw %%mm3, %%mm2\n\t" - "paddw %%mm4, %%mm2\n\t" - : : "m" (DD)); - - asm volatile( - /* dst[0..3] = pack((mm2 + 32) >> 6) */ - "paddw %1, %%mm2\n\t" - "psrlw $6, %%mm2\n\t" - "packuswb %%mm7, %%mm2\n\t" - H264_CHROMA_OP4(%0, %%mm2, %%mm3) - "movd %%mm2, %0\n\t" - : "=m" (dst[0]) : "m" (ff_pw_32)); - dst += stride; - } -} - -#ifdef H264_CHROMA_MC2_TMPL -static void H264_CHROMA_MC2_TMPL(uint8_t *dst/*align 2*/, uint8_t *src/*align 1*/, int stride, int h, int x, int y) -{ - int CD=((1<<16)-1)*x*y + 8*y; - int AB=((8<<16)-8)*x + 64 - CD; - int i; - - asm volatile( - /* mm5 = {A,B,A,B} */ - /* mm6 = {C,D,C,D} */ - "movd %0, %%mm5\n\t" - "movd %1, %%mm6\n\t" - "punpckldq %%mm5, %%mm5\n\t" - "punpckldq %%mm6, %%mm6\n\t" - "pxor %%mm7, %%mm7\n\t" - :: "r"(AB), "r"(CD)); - - asm volatile( - /* mm0 = src[0,1,1,2] */ - "movd %0, %%mm0\n\t" - "punpcklbw %%mm7, %%mm0\n\t" - "pshufw $0x94, %%mm0, %%mm0\n\t" - :: "m"(src[0])); - - for(i=0; i<h; i++) { - asm volatile( - /* mm1 = A * src[0,1] + B * src[1,2] */ - "movq %%mm0, %%mm1\n\t" - "pmaddwd %%mm5, %%mm1\n\t" - ::); - - src += stride; - asm volatile( - /* mm0 = src[0,1,1,2] */ - "movd %0, %%mm0\n\t" - "punpcklbw %%mm7, %%mm0\n\t" - "pshufw $0x94, %%mm0, %%mm0\n\t" - :: "m"(src[0])); - - asm volatile( - /* mm1 += C * src[0,1] + D * src[1,2] */ - "movq %%mm0, %%mm2\n\t" - "pmaddwd %%mm6, %%mm2\n\t" - "paddw %%mm2, %%mm1\n\t" - ::); - - asm volatile( - /* dst[0,1] = pack((mm1 + 32) >> 6) */ - "paddw %1, %%mm1\n\t" - "psrlw $6, %%mm1\n\t" - "packssdw %%mm7, %%mm1\n\t" - "packuswb %%mm7, %%mm1\n\t" - /* writes garbage to the right of dst. - * ok because partitions are processed from left to right. */ - H264_CHROMA_OP4(%0, %%mm1, %%mm3) - "movd %%mm1, %0\n\t" - : "=m" (dst[0]) : "m" (ff_pw_32)); - dst += stride; - } -} -#endif - diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c deleted file mode 100644 index 27c0c678c..000000000 --- a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c +++ /dev/null @@ -1,3540 +0,0 @@ -/* - * MMX optimized DSP utils - * Copyright (c) 2000, 2001 Fabrice Bellard. - * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * MMX optimization by Nick Kurshev <nickols_k@mail.ru> - */ - -#include "../dsputil.h" -#include "../simple_idct.h" -#include "../mpegvideo.h" -#include "x86_cpu.h" -#include "mmx.h" - -//#undef NDEBUG -//#include <assert.h> - -extern void ff_idct_xvid_mmx(short *block); -extern void ff_idct_xvid_mmx2(short *block); - -int mm_flags; /* multimedia extension flags */ - -/* pixel operations */ -static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL; -static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL; -static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL; - -static const uint64_t ff_pdw_80000000[2] attribute_used __attribute__ ((aligned(16))) = -{0x8000000080000000ULL, 0x8000000080000000ULL}; - -static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL; -static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL; -static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL; -static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL; -static const uint64_t ff_pw_8 attribute_used __attribute__ ((aligned(8))) = 0x0008000800080008ULL; -static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL; -static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL; -static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL; -static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL; - -static const uint64_t ff_pb_1 attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL; -static const uint64_t ff_pb_3 attribute_used __attribute__ ((aligned(8))) = 0x0303030303030303ULL; -static const uint64_t ff_pb_7 attribute_used __attribute__ ((aligned(8))) = 0x0707070707070707ULL; -static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL; -static const uint64_t ff_pb_A1 attribute_used __attribute__ ((aligned(8))) = 0xA1A1A1A1A1A1A1A1ULL; -static const uint64_t ff_pb_5F attribute_used __attribute__ ((aligned(8))) = 0x5F5F5F5F5F5F5F5FULL; -static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL; - -#define JUMPALIGN() __asm __volatile (ASMALIGN(3)::) -#define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::) - -#define MOVQ_WONE(regd) \ - __asm __volatile ( \ - "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ - "psrlw $15, %%" #regd ::) - -#define MOVQ_BFE(regd) \ - __asm __volatile ( \ - "pcmpeqd %%" #regd ", %%" #regd " \n\t"\ - "paddb %%" #regd ", %%" #regd " \n\t" ::) - -#ifndef PIC -#define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone)) -#define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo)) -#else -// for shared library it's better to use this way for accessing constants -// pcmpeqd -> -1 -#define MOVQ_BONE(regd) \ - __asm __volatile ( \ - "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ - "psrlw $15, %%" #regd " \n\t" \ - "packuswb %%" #regd ", %%" #regd " \n\t" ::) - -#define MOVQ_WTWO(regd) \ - __asm __volatile ( \ - "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ - "psrlw $15, %%" #regd " \n\t" \ - "psllw $1, %%" #regd " \n\t"::) - -#endif - -// using regr as temporary and for the output result -// first argument is unmodifed and second is trashed -// regfe is supposed to contain 0xfefefefefefefefe -#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \ - "movq " #rega ", " #regr " \n\t"\ - "pand " #regb ", " #regr " \n\t"\ - "pxor " #rega ", " #regb " \n\t"\ - "pand " #regfe "," #regb " \n\t"\ - "psrlq $1, " #regb " \n\t"\ - "paddb " #regb ", " #regr " \n\t" - -#define PAVGB_MMX(rega, regb, regr, regfe) \ - "movq " #rega ", " #regr " \n\t"\ - "por " #regb ", " #regr " \n\t"\ - "pxor " #rega ", " #regb " \n\t"\ - "pand " #regfe "," #regb " \n\t"\ - "psrlq $1, " #regb " \n\t"\ - "psubb " #regb ", " #regr " \n\t" - -// mm6 is supposed to contain 0xfefefefefefefefe -#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \ - "movq " #rega ", " #regr " \n\t"\ - "movq " #regc ", " #regp " \n\t"\ - "pand " #regb ", " #regr " \n\t"\ - "pand " #regd ", " #regp " \n\t"\ - "pxor " #rega ", " #regb " \n\t"\ - "pxor " #regc ", " #regd " \n\t"\ - "pand %%mm6, " #regb " \n\t"\ - "pand %%mm6, " #regd " \n\t"\ - "psrlq $1, " #regb " \n\t"\ - "psrlq $1, " #regd " \n\t"\ - "paddb " #regb ", " #regr " \n\t"\ - "paddb " #regd ", " #regp " \n\t" - -#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \ - "movq " #rega ", " #regr " \n\t"\ - "movq " #regc ", " #regp " \n\t"\ - "por " #regb ", " #regr " \n\t"\ - "por " #regd ", " #regp " \n\t"\ - "pxor " #rega ", " #regb " \n\t"\ - "pxor " #regc ", " #regd " \n\t"\ - "pand %%mm6, " #regb " \n\t"\ - "pand %%mm6, " #regd " \n\t"\ - "psrlq $1, " #regd " \n\t"\ - "psrlq $1, " #regb " \n\t"\ - "psubb " #regb ", " #regr " \n\t"\ - "psubb " #regd ", " #regp " \n\t" - -/***********************************/ -/* MMX no rounding */ -#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx -#define SET_RND MOVQ_WONE -#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f) -#define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e) - -#include "dsputil_mmx_rnd.h" - -#undef DEF -#undef SET_RND -#undef PAVGBP -#undef PAVGB -/***********************************/ -/* MMX rounding */ - -#define DEF(x, y) x ## _ ## y ##_mmx -#define SET_RND MOVQ_WTWO -#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f) -#define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e) - -#include "dsputil_mmx_rnd.h" - -#undef DEF -#undef SET_RND -#undef PAVGBP -#undef PAVGB - -/***********************************/ -/* 3Dnow specific */ - -#define DEF(x) x ## _3dnow -/* for Athlons PAVGUSB is prefered */ -#define PAVGB "pavgusb" - -#include "dsputil_mmx_avg.h" - -#undef DEF -#undef PAVGB - -/***********************************/ -/* MMX2 specific */ - -#define DEF(x) x ## _mmx2 - -/* Introduced only in MMX2 set */ -#define PAVGB "pavgb" - -#include "dsputil_mmx_avg.h" - -#undef DEF -#undef PAVGB - -#define SBUTTERFLY(a,b,t,n)\ - "movq " #a ", " #t " \n\t" /* abcd */\ - "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\ - "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\ - -/***********************************/ -/* standard MMX */ - -#ifdef CONFIG_ENCODERS -static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size) -{ - asm volatile( - "mov $-128, %%"REG_a" \n\t" - "pxor %%mm7, %%mm7 \n\t" - ASMALIGN(4) - "1: \n\t" - "movq (%0), %%mm0 \n\t" - "movq (%0, %2), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "movq %%mm0, (%1, %%"REG_a") \n\t" - "movq %%mm1, 8(%1, %%"REG_a") \n\t" - "movq %%mm2, 16(%1, %%"REG_a") \n\t" - "movq %%mm3, 24(%1, %%"REG_a") \n\t" - "add %3, %0 \n\t" - "add $32, %%"REG_a" \n\t" - "js 1b \n\t" - : "+r" (pixels) - : "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2) - : "%"REG_a - ); -} - -static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride) -{ - asm volatile( - "pxor %%mm7, %%mm7 \n\t" - "mov $-128, %%"REG_a" \n\t" - ASMALIGN(4) - "1: \n\t" - "movq (%0), %%mm0 \n\t" - "movq (%1), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "psubw %%mm2, %%mm0 \n\t" - "psubw %%mm3, %%mm1 \n\t" - "movq %%mm0, (%2, %%"REG_a") \n\t" - "movq %%mm1, 8(%2, %%"REG_a") \n\t" - "add %3, %0 \n\t" - "add %3, %1 \n\t" - "add $16, %%"REG_a" \n\t" - "jnz 1b \n\t" - : "+r" (s1), "+r" (s2) - : "r" (block+64), "r" ((long)stride) - : "%"REG_a - ); -} -#endif //CONFIG_ENCODERS - -void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) -{ - const DCTELEM *p; - uint8_t *pix; - - /* read the pixels */ - p = block; - pix = pixels; - /* unrolled loop */ - __asm __volatile( - "movq %3, %%mm0 \n\t" - "movq 8%3, %%mm1 \n\t" - "movq 16%3, %%mm2 \n\t" - "movq 24%3, %%mm3 \n\t" - "movq 32%3, %%mm4 \n\t" - "movq 40%3, %%mm5 \n\t" - "movq 48%3, %%mm6 \n\t" - "movq 56%3, %%mm7 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "packuswb %%mm3, %%mm2 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "packuswb %%mm7, %%mm6 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm2, (%0, %1) \n\t" - "movq %%mm4, (%0, %1, 2) \n\t" - "movq %%mm6, (%0, %2) \n\t" - ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p) - :"memory"); - pix += line_size*4; - p += 32; - - // if here would be an exact copy of the code above - // compiler would generate some very strange code - // thus using "r" - __asm __volatile( - "movq (%3), %%mm0 \n\t" - "movq 8(%3), %%mm1 \n\t" - "movq 16(%3), %%mm2 \n\t" - "movq 24(%3), %%mm3 \n\t" - "movq 32(%3), %%mm4 \n\t" - "movq 40(%3), %%mm5 \n\t" - "movq 48(%3), %%mm6 \n\t" - "movq 56(%3), %%mm7 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "packuswb %%mm3, %%mm2 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "packuswb %%mm7, %%mm6 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm2, (%0, %1) \n\t" - "movq %%mm4, (%0, %1, 2) \n\t" - "movq %%mm6, (%0, %2) \n\t" - ::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p) - :"memory"); -} - -static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) = - { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 }; - -void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) -{ - int i; - - movq_m2r(*vector128, mm1); - for (i = 0; i < 8; i++) { - movq_m2r(*(block), mm0); - packsswb_m2r(*(block + 4), mm0); - block += 8; - paddb_r2r(mm1, mm0); - movq_r2m(mm0, *pixels); - pixels += line_size; - } -} - -void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size) -{ - const DCTELEM *p; - uint8_t *pix; - int i; - - /* read the pixels */ - p = block; - pix = pixels; - MOVQ_ZERO(mm7); - i = 4; - do { - __asm __volatile( - "movq (%2), %%mm0 \n\t" - "movq 8(%2), %%mm1 \n\t" - "movq 16(%2), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "movq %0, %%mm4 \n\t" - "movq %1, %%mm6 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddsw %%mm4, %%mm0 \n\t" - "paddsw %%mm5, %%mm1 \n\t" - "movq %%mm6, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm6 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddsw %%mm6, %%mm2 \n\t" - "paddsw %%mm5, %%mm3 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "packuswb %%mm3, %%mm2 \n\t" - "movq %%mm0, %0 \n\t" - "movq %%mm2, %1 \n\t" - :"+m"(*pix), "+m"(*(pix+line_size)) - :"r"(p) - :"memory"); - pix += line_size*2; - p += 16; - } while (--i); -} - -static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) - "1: \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%1, %3), %%mm1 \n\t" - "movd %%mm0, (%2) \n\t" - "movd %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%1, %3), %%mm1 \n\t" - "movd %%mm0, (%2) \n\t" - "movd %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - : "+g"(h), "+r" (pixels), "+r" (block) - : "r"((long)line_size) - : "%"REG_a, "memory" - ); -} - -static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - : "+g"(h), "+r" (pixels), "+r" (block) - : "r"((long)line_size) - : "%"REG_a, "memory" - ); -} - -static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm4 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1, %3), %%mm5 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm4, 8(%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm4 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1, %3), %%mm5 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm4, 8(%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - : "+g"(h), "+r" (pixels), "+r" (block) - : "r"((long)line_size) - : "%"REG_a, "memory" - ); -} - -static void clear_blocks_mmx(DCTELEM *blocks) -{ - __asm __volatile( - "pxor %%mm7, %%mm7 \n\t" - "mov $-128*6, %%"REG_a" \n\t" - "1: \n\t" - "movq %%mm7, (%0, %%"REG_a") \n\t" - "movq %%mm7, 8(%0, %%"REG_a") \n\t" - "movq %%mm7, 16(%0, %%"REG_a") \n\t" - "movq %%mm7, 24(%0, %%"REG_a") \n\t" - "add $32, %%"REG_a" \n\t" - " js 1b \n\t" - : : "r" (((uint8_t *)blocks)+128*6) - : "%"REG_a - ); -} - -#ifdef CONFIG_ENCODERS -static int pix_sum16_mmx(uint8_t * pix, int line_size){ - const int h=16; - int sum; - long index= -line_size*h; - - __asm __volatile( - "pxor %%mm7, %%mm7 \n\t" - "pxor %%mm6, %%mm6 \n\t" - "1: \n\t" - "movq (%2, %1), %%mm0 \n\t" - "movq (%2, %1), %%mm1 \n\t" - "movq 8(%2, %1), %%mm2 \n\t" - "movq 8(%2, %1), %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "paddw %%mm0, %%mm1 \n\t" - "paddw %%mm2, %%mm3 \n\t" - "paddw %%mm1, %%mm3 \n\t" - "paddw %%mm3, %%mm6 \n\t" - "add %3, %1 \n\t" - " js 1b \n\t" - "movq %%mm6, %%mm5 \n\t" - "psrlq $32, %%mm6 \n\t" - "paddw %%mm5, %%mm6 \n\t" - "movq %%mm6, %%mm5 \n\t" - "psrlq $16, %%mm6 \n\t" - "paddw %%mm5, %%mm6 \n\t" - "movd %%mm6, %0 \n\t" - "andl $0xFFFF, %0 \n\t" - : "=&r" (sum), "+r" (index) - : "r" (pix - index), "r" ((long)line_size) - ); - - return sum; -} -#endif //CONFIG_ENCODERS - -static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){ - long i=0; - asm volatile( - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq (%2, %0), %%mm1 \n\t" - "paddb %%mm0, %%mm1 \n\t" - "movq %%mm1, (%2, %0) \n\t" - "movq 8(%1, %0), %%mm0 \n\t" - "movq 8(%2, %0), %%mm1 \n\t" - "paddb %%mm0, %%mm1 \n\t" - "movq %%mm1, 8(%2, %0) \n\t" - "add $16, %0 \n\t" - "cmp %3, %0 \n\t" - " jb 1b \n\t" - : "+r" (i) - : "r"(src), "r"(dst), "r"((long)w-15) - ); - for(; i<w; i++) - dst[i+0] += src[i+0]; -} - -#define H263_LOOP_FILTER \ - "pxor %%mm7, %%mm7 \n\t"\ - "movq %0, %%mm0 \n\t"\ - "movq %0, %%mm1 \n\t"\ - "movq %3, %%mm2 \n\t"\ - "movq %3, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpckhbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpckhbw %%mm7, %%mm3 \n\t"\ - "psubw %%mm2, %%mm0 \n\t"\ - "psubw %%mm3, %%mm1 \n\t"\ - "movq %1, %%mm2 \n\t"\ - "movq %1, %%mm3 \n\t"\ - "movq %2, %%mm4 \n\t"\ - "movq %2, %%mm5 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpckhbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm4 \n\t"\ - "punpckhbw %%mm7, %%mm5 \n\t"\ - "psubw %%mm2, %%mm4 \n\t"\ - "psubw %%mm3, %%mm5 \n\t"\ - "psllw $2, %%mm4 \n\t"\ - "psllw $2, %%mm5 \n\t"\ - "paddw %%mm0, %%mm4 \n\t"\ - "paddw %%mm1, %%mm5 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "pcmpgtw %%mm4, %%mm6 \n\t"\ - "pcmpgtw %%mm5, %%mm7 \n\t"\ - "pxor %%mm6, %%mm4 \n\t"\ - "pxor %%mm7, %%mm5 \n\t"\ - "psubw %%mm6, %%mm4 \n\t"\ - "psubw %%mm7, %%mm5 \n\t"\ - "psrlw $3, %%mm4 \n\t"\ - "psrlw $3, %%mm5 \n\t"\ - "packuswb %%mm5, %%mm4 \n\t"\ - "packsswb %%mm7, %%mm6 \n\t"\ - "pxor %%mm7, %%mm7 \n\t"\ - "movd %4, %%mm2 \n\t"\ - "punpcklbw %%mm2, %%mm2 \n\t"\ - "punpcklbw %%mm2, %%mm2 \n\t"\ - "punpcklbw %%mm2, %%mm2 \n\t"\ - "psubusb %%mm4, %%mm2 \n\t"\ - "movq %%mm2, %%mm3 \n\t"\ - "psubusb %%mm4, %%mm3 \n\t"\ - "psubb %%mm3, %%mm2 \n\t"\ - "movq %1, %%mm3 \n\t"\ - "movq %2, %%mm4 \n\t"\ - "pxor %%mm6, %%mm3 \n\t"\ - "pxor %%mm6, %%mm4 \n\t"\ - "paddusb %%mm2, %%mm3 \n\t"\ - "psubusb %%mm2, %%mm4 \n\t"\ - "pxor %%mm6, %%mm3 \n\t"\ - "pxor %%mm6, %%mm4 \n\t"\ - "paddusb %%mm2, %%mm2 \n\t"\ - "packsswb %%mm1, %%mm0 \n\t"\ - "pcmpgtb %%mm0, %%mm7 \n\t"\ - "pxor %%mm7, %%mm0 \n\t"\ - "psubb %%mm7, %%mm0 \n\t"\ - "movq %%mm0, %%mm1 \n\t"\ - "psubusb %%mm2, %%mm0 \n\t"\ - "psubb %%mm0, %%mm1 \n\t"\ - "pand %5, %%mm1 \n\t"\ - "psrlw $2, %%mm1 \n\t"\ - "pxor %%mm7, %%mm1 \n\t"\ - "psubb %%mm7, %%mm1 \n\t"\ - "movq %0, %%mm5 \n\t"\ - "movq %3, %%mm6 \n\t"\ - "psubb %%mm1, %%mm5 \n\t"\ - "paddb %%mm1, %%mm6 \n\t" - -static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){ - const int strength= ff_h263_loop_filter_strength[qscale]; - - asm volatile( - - H263_LOOP_FILTER - - "movq %%mm3, %1 \n\t" - "movq %%mm4, %2 \n\t" - "movq %%mm5, %0 \n\t" - "movq %%mm6, %3 \n\t" - : "+m" (*(uint64_t*)(src - 2*stride)), - "+m" (*(uint64_t*)(src - 1*stride)), - "+m" (*(uint64_t*)(src + 0*stride)), - "+m" (*(uint64_t*)(src + 1*stride)) - : "g" (2*strength), "m"(ff_pb_FC) - ); -} - -static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){ - asm volatile( //FIXME could save 1 instruction if done as 8x4 ... - "movd %4, %%mm0 \n\t" - "movd %5, %%mm1 \n\t" - "movd %6, %%mm2 \n\t" - "movd %7, %%mm3 \n\t" - "punpcklbw %%mm1, %%mm0 \n\t" - "punpcklbw %%mm3, %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "punpcklwd %%mm2, %%mm0 \n\t" - "punpckhwd %%mm2, %%mm1 \n\t" - "movd %%mm0, %0 \n\t" - "punpckhdq %%mm0, %%mm0 \n\t" - "movd %%mm0, %1 \n\t" - "movd %%mm1, %2 \n\t" - "punpckhdq %%mm1, %%mm1 \n\t" - "movd %%mm1, %3 \n\t" - - : "=m" (*(uint32_t*)(dst + 0*dst_stride)), - "=m" (*(uint32_t*)(dst + 1*dst_stride)), - "=m" (*(uint32_t*)(dst + 2*dst_stride)), - "=m" (*(uint32_t*)(dst + 3*dst_stride)) - : "m" (*(uint32_t*)(src + 0*src_stride)), - "m" (*(uint32_t*)(src + 1*src_stride)), - "m" (*(uint32_t*)(src + 2*src_stride)), - "m" (*(uint32_t*)(src + 3*src_stride)) - ); -} - -static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){ - const int strength= ff_h263_loop_filter_strength[qscale]; - uint64_t temp[4] __attribute__ ((aligned(8))); - uint8_t *btemp= (uint8_t*)temp; - - src -= 2; - - transpose4x4(btemp , src , 8, stride); - transpose4x4(btemp+4, src + 4*stride, 8, stride); - asm volatile( - H263_LOOP_FILTER // 5 3 4 6 - - : "+m" (temp[0]), - "+m" (temp[1]), - "+m" (temp[2]), - "+m" (temp[3]) - : "g" (2*strength), "m"(ff_pb_FC) - ); - - asm volatile( - "movq %%mm5, %%mm1 \n\t" - "movq %%mm4, %%mm0 \n\t" - "punpcklbw %%mm3, %%mm5 \n\t" - "punpcklbw %%mm6, %%mm4 \n\t" - "punpckhbw %%mm3, %%mm1 \n\t" - "punpckhbw %%mm6, %%mm0 \n\t" - "movq %%mm5, %%mm3 \n\t" - "movq %%mm1, %%mm6 \n\t" - "punpcklwd %%mm4, %%mm5 \n\t" - "punpcklwd %%mm0, %%mm1 \n\t" - "punpckhwd %%mm4, %%mm3 \n\t" - "punpckhwd %%mm0, %%mm6 \n\t" - "movd %%mm5, (%0) \n\t" - "punpckhdq %%mm5, %%mm5 \n\t" - "movd %%mm5, (%0,%2) \n\t" - "movd %%mm3, (%0,%2,2) \n\t" - "punpckhdq %%mm3, %%mm3 \n\t" - "movd %%mm3, (%0,%3) \n\t" - "movd %%mm1, (%1) \n\t" - "punpckhdq %%mm1, %%mm1 \n\t" - "movd %%mm1, (%1,%2) \n\t" - "movd %%mm6, (%1,%2,2) \n\t" - "punpckhdq %%mm6, %%mm6 \n\t" - "movd %%mm6, (%1,%3) \n\t" - :: "r" (src), - "r" (src + 4*stride), - "r" ((long) stride ), - "r" ((long)(3*stride)) - ); -} - -#ifdef CONFIG_ENCODERS -static int pix_norm1_mmx(uint8_t *pix, int line_size) { - int tmp; - asm volatile ( - "movl $16,%%ecx\n" - "pxor %%mm0,%%mm0\n" - "pxor %%mm7,%%mm7\n" - "1:\n" - "movq (%0),%%mm2\n" /* mm2 = pix[0-7] */ - "movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */ - - "movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */ - - "punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */ - "punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */ - - "movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */ - "punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */ - "punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */ - - "pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */ - "pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */ - - "pmaddwd %%mm3,%%mm3\n" - "pmaddwd %%mm4,%%mm4\n" - - "paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2, - pix2^2+pix3^2+pix6^2+pix7^2) */ - "paddd %%mm3,%%mm4\n" - "paddd %%mm2,%%mm7\n" - - "add %2, %0\n" - "paddd %%mm4,%%mm7\n" - "dec %%ecx\n" - "jnz 1b\n" - - "movq %%mm7,%%mm1\n" - "psrlq $32, %%mm7\n" /* shift hi dword to lo */ - "paddd %%mm7,%%mm1\n" - "movd %%mm1,%1\n" - : "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" ); - return tmp; -} - -static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { - int tmp; - asm volatile ( - "movl %4,%%ecx\n" - "shr $1,%%ecx\n" - "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ - "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ - "1:\n" - "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */ - "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */ - "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */ - "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */ - - /* todo: mm1-mm2, mm3-mm4 */ - /* algo: substract mm1 from mm2 with saturation and vice versa */ - /* OR the results to get absolute difference */ - "movq %%mm1,%%mm5\n" - "movq %%mm3,%%mm6\n" - "psubusb %%mm2,%%mm1\n" - "psubusb %%mm4,%%mm3\n" - "psubusb %%mm5,%%mm2\n" - "psubusb %%mm6,%%mm4\n" - - "por %%mm1,%%mm2\n" - "por %%mm3,%%mm4\n" - - /* now convert to 16-bit vectors so we can square them */ - "movq %%mm2,%%mm1\n" - "movq %%mm4,%%mm3\n" - - "punpckhbw %%mm0,%%mm2\n" - "punpckhbw %%mm0,%%mm4\n" - "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ - "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */ - - "pmaddwd %%mm2,%%mm2\n" - "pmaddwd %%mm4,%%mm4\n" - "pmaddwd %%mm1,%%mm1\n" - "pmaddwd %%mm3,%%mm3\n" - - "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */ - "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */ - - "paddd %%mm2,%%mm1\n" - "paddd %%mm4,%%mm3\n" - "paddd %%mm1,%%mm7\n" - "paddd %%mm3,%%mm7\n" - - "decl %%ecx\n" - "jnz 1b\n" - - "movq %%mm7,%%mm1\n" - "psrlq $32, %%mm7\n" /* shift hi dword to lo */ - "paddd %%mm7,%%mm1\n" - "movd %%mm1,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) - : "r" ((long)line_size) , "m" (h) - : "%ecx"); - return tmp; -} - -static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { - int tmp; - asm volatile ( - "movl %4,%%ecx\n" - "pxor %%mm0,%%mm0\n" /* mm0 = 0 */ - "pxor %%mm7,%%mm7\n" /* mm7 holds the sum */ - "1:\n" - "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */ - "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */ - "movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */ - "movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */ - - /* todo: mm1-mm2, mm3-mm4 */ - /* algo: substract mm1 from mm2 with saturation and vice versa */ - /* OR the results to get absolute difference */ - "movq %%mm1,%%mm5\n" - "movq %%mm3,%%mm6\n" - "psubusb %%mm2,%%mm1\n" - "psubusb %%mm4,%%mm3\n" - "psubusb %%mm5,%%mm2\n" - "psubusb %%mm6,%%mm4\n" - - "por %%mm1,%%mm2\n" - "por %%mm3,%%mm4\n" - - /* now convert to 16-bit vectors so we can square them */ - "movq %%mm2,%%mm1\n" - "movq %%mm4,%%mm3\n" - - "punpckhbw %%mm0,%%mm2\n" - "punpckhbw %%mm0,%%mm4\n" - "punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */ - "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */ - - "pmaddwd %%mm2,%%mm2\n" - "pmaddwd %%mm4,%%mm4\n" - "pmaddwd %%mm1,%%mm1\n" - "pmaddwd %%mm3,%%mm3\n" - - "add %3,%0\n" - "add %3,%1\n" - - "paddd %%mm2,%%mm1\n" - "paddd %%mm4,%%mm3\n" - "paddd %%mm1,%%mm7\n" - "paddd %%mm3,%%mm7\n" - - "decl %%ecx\n" - "jnz 1b\n" - - "movq %%mm7,%%mm1\n" - "psrlq $32, %%mm7\n" /* shift hi dword to lo */ - "paddd %%mm7,%%mm1\n" - "movd %%mm1,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) - : "r" ((long)line_size) , "m" (h) - : "%ecx"); - return tmp; -} - -static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { - int tmp; - asm volatile ( - "shr $1,%2\n" - "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */ - "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */ - "1:\n" - "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */ - "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */ - "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */ - "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */ - - /* todo: mm1-mm2, mm3-mm4 */ - /* algo: substract mm1 from mm2 with saturation and vice versa */ - /* OR the results to get absolute difference */ - "movdqa %%xmm1,%%xmm5\n" - "movdqa %%xmm3,%%xmm6\n" - "psubusb %%xmm2,%%xmm1\n" - "psubusb %%xmm4,%%xmm3\n" - "psubusb %%xmm5,%%xmm2\n" - "psubusb %%xmm6,%%xmm4\n" - - "por %%xmm1,%%xmm2\n" - "por %%xmm3,%%xmm4\n" - - /* now convert to 16-bit vectors so we can square them */ - "movdqa %%xmm2,%%xmm1\n" - "movdqa %%xmm4,%%xmm3\n" - - "punpckhbw %%xmm0,%%xmm2\n" - "punpckhbw %%xmm0,%%xmm4\n" - "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */ - "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */ - - "pmaddwd %%xmm2,%%xmm2\n" - "pmaddwd %%xmm4,%%xmm4\n" - "pmaddwd %%xmm1,%%xmm1\n" - "pmaddwd %%xmm3,%%xmm3\n" - - "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */ - "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */ - - "paddd %%xmm2,%%xmm1\n" - "paddd %%xmm4,%%xmm3\n" - "paddd %%xmm1,%%xmm7\n" - "paddd %%xmm3,%%xmm7\n" - - "decl %2\n" - "jnz 1b\n" - - "movdqa %%xmm7,%%xmm1\n" - "psrldq $8, %%xmm7\n" /* shift hi qword to lo */ - "paddd %%xmm1,%%xmm7\n" - "movdqa %%xmm7,%%xmm1\n" - "psrldq $4, %%xmm7\n" /* shift hi dword to lo */ - "paddd %%xmm1,%%xmm7\n" - "movd %%xmm7,%3\n" - : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp) - : "r" ((long)line_size)); - return tmp; -} - -static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) { - int tmp; - asm volatile ( - "movl %3,%%ecx\n" - "pxor %%mm7,%%mm7\n" - "pxor %%mm6,%%mm6\n" - - "movq (%0),%%mm0\n" - "movq %%mm0, %%mm1\n" - "psllq $8, %%mm0\n" - "psrlq $8, %%mm1\n" - "psrlq $8, %%mm0\n" - "movq %%mm0, %%mm2\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm0\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm2\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm0\n" - "psubw %%mm3, %%mm2\n" - - "add %2,%0\n" - - "movq (%0),%%mm4\n" - "movq %%mm4, %%mm1\n" - "psllq $8, %%mm4\n" - "psrlq $8, %%mm1\n" - "psrlq $8, %%mm4\n" - "movq %%mm4, %%mm5\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm4\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm5\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm4\n" - "psubw %%mm3, %%mm5\n" - "psubw %%mm4, %%mm0\n" - "psubw %%mm5, %%mm2\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm0, %%mm3\n\t" - "pcmpgtw %%mm2, %%mm1\n\t" - "pxor %%mm3, %%mm0\n" - "pxor %%mm1, %%mm2\n" - "psubw %%mm3, %%mm0\n" - "psubw %%mm1, %%mm2\n" - "paddw %%mm0, %%mm2\n" - "paddw %%mm2, %%mm6\n" - - "add %2,%0\n" - "1:\n" - - "movq (%0),%%mm0\n" - "movq %%mm0, %%mm1\n" - "psllq $8, %%mm0\n" - "psrlq $8, %%mm1\n" - "psrlq $8, %%mm0\n" - "movq %%mm0, %%mm2\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm0\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm2\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm0\n" - "psubw %%mm3, %%mm2\n" - "psubw %%mm0, %%mm4\n" - "psubw %%mm2, %%mm5\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm4, %%mm3\n\t" - "pcmpgtw %%mm5, %%mm1\n\t" - "pxor %%mm3, %%mm4\n" - "pxor %%mm1, %%mm5\n" - "psubw %%mm3, %%mm4\n" - "psubw %%mm1, %%mm5\n" - "paddw %%mm4, %%mm5\n" - "paddw %%mm5, %%mm6\n" - - "add %2,%0\n" - - "movq (%0),%%mm4\n" - "movq %%mm4, %%mm1\n" - "psllq $8, %%mm4\n" - "psrlq $8, %%mm1\n" - "psrlq $8, %%mm4\n" - "movq %%mm4, %%mm5\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm4\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm5\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm4\n" - "psubw %%mm3, %%mm5\n" - "psubw %%mm4, %%mm0\n" - "psubw %%mm5, %%mm2\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm0, %%mm3\n\t" - "pcmpgtw %%mm2, %%mm1\n\t" - "pxor %%mm3, %%mm0\n" - "pxor %%mm1, %%mm2\n" - "psubw %%mm3, %%mm0\n" - "psubw %%mm1, %%mm2\n" - "paddw %%mm0, %%mm2\n" - "paddw %%mm2, %%mm6\n" - - "add %2,%0\n" - "subl $2, %%ecx\n" - " jnz 1b\n" - - "movq %%mm6, %%mm0\n" - "punpcklwd %%mm7,%%mm0\n" - "punpckhwd %%mm7,%%mm6\n" - "paddd %%mm0, %%mm6\n" - - "movq %%mm6,%%mm0\n" - "psrlq $32, %%mm6\n" - "paddd %%mm6,%%mm0\n" - "movd %%mm0,%1\n" - : "+r" (pix1), "=r"(tmp) - : "r" ((long)line_size) , "g" (h-2) - : "%ecx"); - return tmp; -} - -static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) { - int tmp; - uint8_t * pix= pix1; - asm volatile ( - "movl %3,%%ecx\n" - "pxor %%mm7,%%mm7\n" - "pxor %%mm6,%%mm6\n" - - "movq (%0),%%mm0\n" - "movq 1(%0),%%mm1\n" - "movq %%mm0, %%mm2\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm0\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm2\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm0\n" - "psubw %%mm3, %%mm2\n" - - "add %2,%0\n" - - "movq (%0),%%mm4\n" - "movq 1(%0),%%mm1\n" - "movq %%mm4, %%mm5\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm4\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm5\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm4\n" - "psubw %%mm3, %%mm5\n" - "psubw %%mm4, %%mm0\n" - "psubw %%mm5, %%mm2\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm0, %%mm3\n\t" - "pcmpgtw %%mm2, %%mm1\n\t" - "pxor %%mm3, %%mm0\n" - "pxor %%mm1, %%mm2\n" - "psubw %%mm3, %%mm0\n" - "psubw %%mm1, %%mm2\n" - "paddw %%mm0, %%mm2\n" - "paddw %%mm2, %%mm6\n" - - "add %2,%0\n" - "1:\n" - - "movq (%0),%%mm0\n" - "movq 1(%0),%%mm1\n" - "movq %%mm0, %%mm2\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm0\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm2\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm0\n" - "psubw %%mm3, %%mm2\n" - "psubw %%mm0, %%mm4\n" - "psubw %%mm2, %%mm5\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm4, %%mm3\n\t" - "pcmpgtw %%mm5, %%mm1\n\t" - "pxor %%mm3, %%mm4\n" - "pxor %%mm1, %%mm5\n" - "psubw %%mm3, %%mm4\n" - "psubw %%mm1, %%mm5\n" - "paddw %%mm4, %%mm5\n" - "paddw %%mm5, %%mm6\n" - - "add %2,%0\n" - - "movq (%0),%%mm4\n" - "movq 1(%0),%%mm1\n" - "movq %%mm4, %%mm5\n" - "movq %%mm1, %%mm3\n" - "punpcklbw %%mm7,%%mm4\n" - "punpcklbw %%mm7,%%mm1\n" - "punpckhbw %%mm7,%%mm5\n" - "punpckhbw %%mm7,%%mm3\n" - "psubw %%mm1, %%mm4\n" - "psubw %%mm3, %%mm5\n" - "psubw %%mm4, %%mm0\n" - "psubw %%mm5, %%mm2\n" - "pxor %%mm3, %%mm3\n" - "pxor %%mm1, %%mm1\n" - "pcmpgtw %%mm0, %%mm3\n\t" - "pcmpgtw %%mm2, %%mm1\n\t" - "pxor %%mm3, %%mm0\n" - "pxor %%mm1, %%mm2\n" - "psubw %%mm3, %%mm0\n" - "psubw %%mm1, %%mm2\n" - "paddw %%mm0, %%mm2\n" - "paddw %%mm2, %%mm6\n" - - "add %2,%0\n" - "subl $2, %%ecx\n" - " jnz 1b\n" - - "movq %%mm6, %%mm0\n" - "punpcklwd %%mm7,%%mm0\n" - "punpckhwd %%mm7,%%mm6\n" - "paddd %%mm0, %%mm6\n" - - "movq %%mm6,%%mm0\n" - "psrlq $32, %%mm6\n" - "paddd %%mm6,%%mm0\n" - "movd %%mm0,%1\n" - : "+r" (pix1), "=r"(tmp) - : "r" ((long)line_size) , "g" (h-2) - : "%ecx"); - return tmp + hf_noise8_mmx(pix+8, line_size, h); -} - -static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { - MpegEncContext *c = p; - int score1, score2; - - if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h); - else score1 = sse16_mmx(c, pix1, pix2, line_size, h); - score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h); - - if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight; - else return score1 + FFABS(score2)*8; -} - -static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { - MpegEncContext *c = p; - int score1= sse8_mmx(c, pix1, pix2, line_size, h); - int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h); - - if(c) return score1 + FFABS(score2)*c->avctx->nsse_weight; - else return score1 + FFABS(score2)*8; -} - -static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { - int tmp; - - assert( (((int)pix) & 7) == 0); - assert((line_size &7) ==0); - -#define SUM(in0, in1, out0, out1) \ - "movq (%0), %%mm2\n"\ - "movq 8(%0), %%mm3\n"\ - "add %2,%0\n"\ - "movq %%mm2, " #out0 "\n"\ - "movq %%mm3, " #out1 "\n"\ - "psubusb " #in0 ", %%mm2\n"\ - "psubusb " #in1 ", %%mm3\n"\ - "psubusb " #out0 ", " #in0 "\n"\ - "psubusb " #out1 ", " #in1 "\n"\ - "por %%mm2, " #in0 "\n"\ - "por %%mm3, " #in1 "\n"\ - "movq " #in0 ", %%mm2\n"\ - "movq " #in1 ", %%mm3\n"\ - "punpcklbw %%mm7, " #in0 "\n"\ - "punpcklbw %%mm7, " #in1 "\n"\ - "punpckhbw %%mm7, %%mm2\n"\ - "punpckhbw %%mm7, %%mm3\n"\ - "paddw " #in1 ", " #in0 "\n"\ - "paddw %%mm3, %%mm2\n"\ - "paddw %%mm2, " #in0 "\n"\ - "paddw " #in0 ", %%mm6\n" - - - asm volatile ( - "movl %3,%%ecx\n" - "pxor %%mm6,%%mm6\n" - "pxor %%mm7,%%mm7\n" - "movq (%0),%%mm0\n" - "movq 8(%0),%%mm1\n" - "add %2,%0\n" - "subl $2, %%ecx\n" - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - "1:\n" - - SUM(%%mm4, %%mm5, %%mm0, %%mm1) - - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - - "subl $2, %%ecx\n" - "jnz 1b\n" - - "movq %%mm6,%%mm0\n" - "psrlq $32, %%mm6\n" - "paddw %%mm6,%%mm0\n" - "movq %%mm0,%%mm6\n" - "psrlq $16, %%mm0\n" - "paddw %%mm6,%%mm0\n" - "movd %%mm0,%1\n" - : "+r" (pix), "=r"(tmp) - : "r" ((long)line_size) , "m" (h) - : "%ecx"); - return tmp & 0xFFFF; -} -#undef SUM - -static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) { - int tmp; - - assert( (((int)pix) & 7) == 0); - assert((line_size &7) ==0); - -#define SUM(in0, in1, out0, out1) \ - "movq (%0), " #out0 "\n"\ - "movq 8(%0), " #out1 "\n"\ - "add %2,%0\n"\ - "psadbw " #out0 ", " #in0 "\n"\ - "psadbw " #out1 ", " #in1 "\n"\ - "paddw " #in1 ", " #in0 "\n"\ - "paddw " #in0 ", %%mm6\n" - - asm volatile ( - "movl %3,%%ecx\n" - "pxor %%mm6,%%mm6\n" - "pxor %%mm7,%%mm7\n" - "movq (%0),%%mm0\n" - "movq 8(%0),%%mm1\n" - "add %2,%0\n" - "subl $2, %%ecx\n" - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - "1:\n" - - SUM(%%mm4, %%mm5, %%mm0, %%mm1) - - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - - "subl $2, %%ecx\n" - "jnz 1b\n" - - "movd %%mm6,%1\n" - : "+r" (pix), "=r"(tmp) - : "r" ((long)line_size) , "m" (h) - : "%ecx"); - return tmp; -} -#undef SUM - -static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { - int tmp; - - assert( (((int)pix1) & 7) == 0); - assert( (((int)pix2) & 7) == 0); - assert((line_size &7) ==0); - -#define SUM(in0, in1, out0, out1) \ - "movq (%0),%%mm2\n"\ - "movq (%1)," #out0 "\n"\ - "movq 8(%0),%%mm3\n"\ - "movq 8(%1)," #out1 "\n"\ - "add %3,%0\n"\ - "add %3,%1\n"\ - "psubb " #out0 ", %%mm2\n"\ - "psubb " #out1 ", %%mm3\n"\ - "pxor %%mm7, %%mm2\n"\ - "pxor %%mm7, %%mm3\n"\ - "movq %%mm2, " #out0 "\n"\ - "movq %%mm3, " #out1 "\n"\ - "psubusb " #in0 ", %%mm2\n"\ - "psubusb " #in1 ", %%mm3\n"\ - "psubusb " #out0 ", " #in0 "\n"\ - "psubusb " #out1 ", " #in1 "\n"\ - "por %%mm2, " #in0 "\n"\ - "por %%mm3, " #in1 "\n"\ - "movq " #in0 ", %%mm2\n"\ - "movq " #in1 ", %%mm3\n"\ - "punpcklbw %%mm7, " #in0 "\n"\ - "punpcklbw %%mm7, " #in1 "\n"\ - "punpckhbw %%mm7, %%mm2\n"\ - "punpckhbw %%mm7, %%mm3\n"\ - "paddw " #in1 ", " #in0 "\n"\ - "paddw %%mm3, %%mm2\n"\ - "paddw %%mm2, " #in0 "\n"\ - "paddw " #in0 ", %%mm6\n" - - - asm volatile ( - "movl %4,%%ecx\n" - "pxor %%mm6,%%mm6\n" - "pcmpeqw %%mm7,%%mm7\n" - "psllw $15, %%mm7\n" - "packsswb %%mm7, %%mm7\n" - "movq (%0),%%mm0\n" - "movq (%1),%%mm2\n" - "movq 8(%0),%%mm1\n" - "movq 8(%1),%%mm3\n" - "add %3,%0\n" - "add %3,%1\n" - "subl $2, %%ecx\n" - "psubb %%mm2, %%mm0\n" - "psubb %%mm3, %%mm1\n" - "pxor %%mm7, %%mm0\n" - "pxor %%mm7, %%mm1\n" - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - "1:\n" - - SUM(%%mm4, %%mm5, %%mm0, %%mm1) - - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - - "subl $2, %%ecx\n" - "jnz 1b\n" - - "movq %%mm6,%%mm0\n" - "psrlq $32, %%mm6\n" - "paddw %%mm6,%%mm0\n" - "movq %%mm0,%%mm6\n" - "psrlq $16, %%mm0\n" - "paddw %%mm6,%%mm0\n" - "movd %%mm0,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) - : "r" ((long)line_size) , "m" (h) - : "%ecx"); - return tmp & 0x7FFF; -} -#undef SUM - -static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { - int tmp; - - assert( (((int)pix1) & 7) == 0); - assert( (((int)pix2) & 7) == 0); - assert((line_size &7) ==0); - -#define SUM(in0, in1, out0, out1) \ - "movq (%0)," #out0 "\n"\ - "movq (%1),%%mm2\n"\ - "movq 8(%0)," #out1 "\n"\ - "movq 8(%1),%%mm3\n"\ - "add %3,%0\n"\ - "add %3,%1\n"\ - "psubb %%mm2, " #out0 "\n"\ - "psubb %%mm3, " #out1 "\n"\ - "pxor %%mm7, " #out0 "\n"\ - "pxor %%mm7, " #out1 "\n"\ - "psadbw " #out0 ", " #in0 "\n"\ - "psadbw " #out1 ", " #in1 "\n"\ - "paddw " #in1 ", " #in0 "\n"\ - "paddw " #in0 ", %%mm6\n" - - asm volatile ( - "movl %4,%%ecx\n" - "pxor %%mm6,%%mm6\n" - "pcmpeqw %%mm7,%%mm7\n" - "psllw $15, %%mm7\n" - "packsswb %%mm7, %%mm7\n" - "movq (%0),%%mm0\n" - "movq (%1),%%mm2\n" - "movq 8(%0),%%mm1\n" - "movq 8(%1),%%mm3\n" - "add %3,%0\n" - "add %3,%1\n" - "subl $2, %%ecx\n" - "psubb %%mm2, %%mm0\n" - "psubb %%mm3, %%mm1\n" - "pxor %%mm7, %%mm0\n" - "pxor %%mm7, %%mm1\n" - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - "1:\n" - - SUM(%%mm4, %%mm5, %%mm0, %%mm1) - - SUM(%%mm0, %%mm1, %%mm4, %%mm5) - - "subl $2, %%ecx\n" - "jnz 1b\n" - - "movd %%mm6,%2\n" - : "+r" (pix1), "+r" (pix2), "=r"(tmp) - : "r" ((long)line_size) , "m" (h) - : "%ecx"); - return tmp; -} -#undef SUM - -static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){ - long i=0; - asm volatile( - "1: \n\t" - "movq (%2, %0), %%mm0 \n\t" - "movq (%1, %0), %%mm1 \n\t" - "psubb %%mm0, %%mm1 \n\t" - "movq %%mm1, (%3, %0) \n\t" - "movq 8(%2, %0), %%mm0 \n\t" - "movq 8(%1, %0), %%mm1 \n\t" - "psubb %%mm0, %%mm1 \n\t" - "movq %%mm1, 8(%3, %0) \n\t" - "add $16, %0 \n\t" - "cmp %4, %0 \n\t" - " jb 1b \n\t" - : "+r" (i) - : "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15) - ); - for(; i<w; i++) - dst[i+0] = src1[i+0]-src2[i+0]; -} - -static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){ - long i=0; - uint8_t l, lt; - - asm volatile( - "1: \n\t" - "movq -1(%1, %0), %%mm0 \n\t" // LT - "movq (%1, %0), %%mm1 \n\t" // T - "movq -1(%2, %0), %%mm2 \n\t" // L - "movq (%2, %0), %%mm3 \n\t" // X - "movq %%mm2, %%mm4 \n\t" // L - "psubb %%mm0, %%mm2 \n\t" - "paddb %%mm1, %%mm2 \n\t" // L + T - LT - "movq %%mm4, %%mm5 \n\t" // L - "pmaxub %%mm1, %%mm4 \n\t" // max(T, L) - "pminub %%mm5, %%mm1 \n\t" // min(T, L) - "pminub %%mm2, %%mm4 \n\t" - "pmaxub %%mm1, %%mm4 \n\t" - "psubb %%mm4, %%mm3 \n\t" // dst - pred - "movq %%mm3, (%3, %0) \n\t" - "add $8, %0 \n\t" - "cmp %4, %0 \n\t" - " jb 1b \n\t" - : "+r" (i) - : "r"(src1), "r"(src2), "r"(dst), "r"((long)w) - ); - - l= *left; - lt= *left_top; - - dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF); - - *left_top= src1[w-1]; - *left = src2[w-1]; -} - -#define LBUTTERFLY2(a1,b1,a2,b2)\ - "paddw " #b1 ", " #a1 " \n\t"\ - "paddw " #b2 ", " #a2 " \n\t"\ - "paddw " #b1 ", " #b1 " \n\t"\ - "paddw " #b2 ", " #b2 " \n\t"\ - "psubw " #a1 ", " #b1 " \n\t"\ - "psubw " #a2 ", " #b2 " \n\t" - -#define HADAMARD48\ - LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\ - LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\ - LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\ - LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\ - LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\ - LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\ - -#define MMABS(a,z)\ - "pxor " #z ", " #z " \n\t"\ - "pcmpgtw " #a ", " #z " \n\t"\ - "pxor " #z ", " #a " \n\t"\ - "psubw " #z ", " #a " \n\t" - -#define MMABS_SUM(a,z, sum)\ - "pxor " #z ", " #z " \n\t"\ - "pcmpgtw " #a ", " #z " \n\t"\ - "pxor " #z ", " #a " \n\t"\ - "psubw " #z ", " #a " \n\t"\ - "paddusw " #a ", " #sum " \n\t" - -#define MMABS_MMX2(a,z)\ - "pxor " #z ", " #z " \n\t"\ - "psubw " #a ", " #z " \n\t"\ - "pmaxsw " #z ", " #a " \n\t" - -#define MMABS_SUM_MMX2(a,z, sum)\ - "pxor " #z ", " #z " \n\t"\ - "psubw " #a ", " #z " \n\t"\ - "pmaxsw " #z ", " #a " \n\t"\ - "paddusw " #a ", " #sum " \n\t" - -#define TRANSPOSE4(a,b,c,d,t)\ - SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\ - SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\ - SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\ - SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */ - -#define LOAD4(o, a, b, c, d)\ - "movq "#o"(%1), " #a " \n\t"\ - "movq "#o"+16(%1), " #b " \n\t"\ - "movq "#o"+32(%1), " #c " \n\t"\ - "movq "#o"+48(%1), " #d " \n\t" - -#define STORE4(o, a, b, c, d)\ - "movq "#a", "#o"(%1) \n\t"\ - "movq "#b", "#o"+16(%1) \n\t"\ - "movq "#c", "#o"+32(%1) \n\t"\ - "movq "#d", "#o"+48(%1) \n\t"\ - -static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){ - DECLARE_ALIGNED_8(uint64_t, temp[16]); - int sum=0; - - assert(h==8); - - diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride); - - asm volatile( - LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3) - LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7) - - HADAMARD48 - - "movq %%mm7, 112(%1) \n\t" - - TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7) - STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2) - - "movq 112(%1), %%mm7 \n\t" - TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0) - STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6) - - LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3) - LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7) - - HADAMARD48 - - "movq %%mm7, 120(%1) \n\t" - - TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7) - STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2) - - "movq 120(%1), %%mm7 \n\t" - TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0) - "movq %%mm7, %%mm5 \n\t"//FIXME remove - "movq %%mm6, %%mm7 \n\t" - "movq %%mm0, %%mm6 \n\t" -// STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove - - LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3) -// LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7) - - HADAMARD48 - "movq %%mm7, 64(%1) \n\t" - MMABS(%%mm0, %%mm7) - MMABS_SUM(%%mm1, %%mm7, %%mm0) - MMABS_SUM(%%mm2, %%mm7, %%mm0) - MMABS_SUM(%%mm3, %%mm7, %%mm0) - MMABS_SUM(%%mm4, %%mm7, %%mm0) - MMABS_SUM(%%mm5, %%mm7, %%mm0) - MMABS_SUM(%%mm6, %%mm7, %%mm0) - "movq 64(%1), %%mm1 \n\t" - MMABS_SUM(%%mm1, %%mm7, %%mm0) - "movq %%mm0, 64(%1) \n\t" - - LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3) - LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7) - - HADAMARD48 - "movq %%mm7, (%1) \n\t" - MMABS(%%mm0, %%mm7) - MMABS_SUM(%%mm1, %%mm7, %%mm0) - MMABS_SUM(%%mm2, %%mm7, %%mm0) - MMABS_SUM(%%mm3, %%mm7, %%mm0) - MMABS_SUM(%%mm4, %%mm7, %%mm0) - MMABS_SUM(%%mm5, %%mm7, %%mm0) - MMABS_SUM(%%mm6, %%mm7, %%mm0) - "movq (%1), %%mm1 \n\t" - MMABS_SUM(%%mm1, %%mm7, %%mm0) - "movq 64(%1), %%mm1 \n\t" - MMABS_SUM(%%mm1, %%mm7, %%mm0) - - "movq %%mm0, %%mm1 \n\t" - "psrlq $32, %%mm0 \n\t" - "paddusw %%mm1, %%mm0 \n\t" - "movq %%mm0, %%mm1 \n\t" - "psrlq $16, %%mm0 \n\t" - "paddusw %%mm1, %%mm0 \n\t" - "movd %%mm0, %0 \n\t" - - : "=r" (sum) - : "r"(temp) - ); - return sum&0xFFFF; -} - -static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){ - DECLARE_ALIGNED_8(uint64_t, temp[16]); - int sum=0; - - assert(h==8); - - diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride); - - asm volatile( - LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3) - LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7) - - HADAMARD48 - - "movq %%mm7, 112(%1) \n\t" - - TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7) - STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2) - - "movq 112(%1), %%mm7 \n\t" - TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0) - STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6) - - LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3) - LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7) - - HADAMARD48 - - "movq %%mm7, 120(%1) \n\t" - - TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7) - STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2) - - "movq 120(%1), %%mm7 \n\t" - TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0) - "movq %%mm7, %%mm5 \n\t"//FIXME remove - "movq %%mm6, %%mm7 \n\t" - "movq %%mm0, %%mm6 \n\t" -// STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove - - LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3) -// LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7) - - HADAMARD48 - "movq %%mm7, 64(%1) \n\t" - MMABS_MMX2(%%mm0, %%mm7) - MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0) - "movq 64(%1), %%mm1 \n\t" - MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) - "movq %%mm0, 64(%1) \n\t" - - LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3) - LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7) - - HADAMARD48 - "movq %%mm7, (%1) \n\t" - MMABS_MMX2(%%mm0, %%mm7) - MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0) - MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0) - "movq (%1), %%mm1 \n\t" - MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) - "movq 64(%1), %%mm1 \n\t" - MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0) - - "pshufw $0x0E, %%mm0, %%mm1 \n\t" - "paddusw %%mm1, %%mm0 \n\t" - "pshufw $0x01, %%mm0, %%mm1 \n\t" - "paddusw %%mm1, %%mm0 \n\t" - "movd %%mm0, %0 \n\t" - - : "=r" (sum) - : "r"(temp) - ); - return sum&0xFFFF; -} - - -WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx) -WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2) -#endif //CONFIG_ENCODERS - -#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d) -#define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d) - -#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\ - "paddw " #m4 ", " #m3 " \n\t" /* x1 */\ - "movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\ - "pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\ - "movq "#in7", " #m3 " \n\t" /* d */\ - "movq "#in0", %%mm5 \n\t" /* D */\ - "paddw " #m3 ", %%mm5 \n\t" /* x4 */\ - "psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\ - "movq "#in1", %%mm5 \n\t" /* C */\ - "movq "#in2", %%mm6 \n\t" /* B */\ - "paddw " #m6 ", %%mm5 \n\t" /* x3 */\ - "paddw " #m5 ", %%mm6 \n\t" /* x2 */\ - "paddw %%mm6, %%mm6 \n\t" /* 2x2 */\ - "psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\ - "pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\ - "paddw " #rnd ", %%mm4 \n\t" /* x2 */\ - "paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\ - "psraw $5, %%mm5 \n\t"\ - "packuswb %%mm5, %%mm5 \n\t"\ - OP(%%mm5, out, %%mm7, d) - -#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\ -static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ - uint64_t temp;\ -\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ - "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ - "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ - "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ - "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ - "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ - "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ - "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ - "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ - "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ - "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ - "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ - "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ - "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ - "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ - "paddw %%mm3, %%mm5 \n\t" /* b */\ - "paddw %%mm2, %%mm6 \n\t" /* c */\ - "paddw %%mm5, %%mm5 \n\t" /* 2b */\ - "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ - "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ - "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ - "paddw %%mm4, %%mm0 \n\t" /* a */\ - "paddw %%mm1, %%mm5 \n\t" /* d */\ - "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ - "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ - "paddw %6, %%mm6 \n\t"\ - "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ - "psraw $5, %%mm0 \n\t"\ - "movq %%mm0, %5 \n\t"\ - /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ - \ - "movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\ - "movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\ - "movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\ - "psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\ - "psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\ - "punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\ - "punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\ - "paddw %%mm0, %%mm2 \n\t" /* b */\ - "paddw %%mm5, %%mm3 \n\t" /* c */\ - "paddw %%mm2, %%mm2 \n\t" /* 2b */\ - "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ - "movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\ - "psrlq $24, %%mm6 \n\t" /* IJKLM000 */\ - "punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\ - "punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\ - "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ - "paddw %%mm2, %%mm1 \n\t" /* a */\ - "paddw %%mm6, %%mm4 \n\t" /* d */\ - "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ - "psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\ - "paddw %6, %%mm1 \n\t"\ - "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\ - "psraw $5, %%mm3 \n\t"\ - "movq %5, %%mm1 \n\t"\ - "packuswb %%mm3, %%mm1 \n\t"\ - OP_MMX2(%%mm1, (%1),%%mm4, q)\ - /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\ - \ - "movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\ - "movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\ - "movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\ - "psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\ - "psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\ - "punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\ - "punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\ - "paddw %%mm1, %%mm5 \n\t" /* b */\ - "paddw %%mm4, %%mm0 \n\t" /* c */\ - "paddw %%mm5, %%mm5 \n\t" /* 2b */\ - "psubw %%mm5, %%mm0 \n\t" /* c - 2b */\ - "movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\ - "psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\ - "pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\ - "punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\ - "paddw %%mm3, %%mm2 \n\t" /* d */\ - "psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\ - "movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\ - "punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\ - "punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\ - "paddw %%mm2, %%mm6 \n\t" /* a */\ - "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\ - "paddw %6, %%mm0 \n\t"\ - "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ - "psraw $5, %%mm0 \n\t"\ - /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\ - \ - "paddw %%mm5, %%mm3 \n\t" /* a */\ - "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\ - "paddw %%mm4, %%mm6 \n\t" /* b */\ - "pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\ - "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\ - "paddw %%mm1, %%mm4 \n\t" /* c */\ - "paddw %%mm2, %%mm5 \n\t" /* d */\ - "paddw %%mm6, %%mm6 \n\t" /* 2b */\ - "psubw %%mm6, %%mm4 \n\t" /* c - 2b */\ - "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\ - "pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\ - "psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\ - "paddw %6, %%mm4 \n\t"\ - "paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\ - "psraw $5, %%mm4 \n\t"\ - "packuswb %%mm4, %%mm0 \n\t"\ - OP_MMX2(%%mm0, 8(%1), %%mm4, q)\ - \ - "add %3, %0 \n\t"\ - "add %4, %1 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - : "+a"(src), "+c"(dst), "+m"(h)\ - : "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ - : "memory"\ - );\ -}\ -\ -static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ - int i;\ - int16_t temp[16];\ - /* quick HACK, XXX FIXME MUST be optimized */\ - for(i=0; i<h; i++)\ - {\ - temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ - temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ - temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ - temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ - temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ - temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\ - temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\ - temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\ - temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\ - temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\ - temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\ - temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\ - temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\ - temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\ - temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\ - temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\ - asm volatile(\ - "movq (%0), %%mm0 \n\t"\ - "movq 8(%0), %%mm1 \n\t"\ - "paddw %2, %%mm0 \n\t"\ - "paddw %2, %%mm1 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "psraw $5, %%mm1 \n\t"\ - "packuswb %%mm1, %%mm0 \n\t"\ - OP_3DNOW(%%mm0, (%1), %%mm1, q)\ - "movq 16(%0), %%mm0 \n\t"\ - "movq 24(%0), %%mm1 \n\t"\ - "paddw %2, %%mm0 \n\t"\ - "paddw %2, %%mm1 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "psraw $5, %%mm1 \n\t"\ - "packuswb %%mm1, %%mm0 \n\t"\ - OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\ - :: "r"(temp), "r"(dst), "m"(ROUNDER)\ - : "memory"\ - );\ - dst+=dstStride;\ - src+=srcStride;\ - }\ -}\ -\ -static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ - uint64_t temp;\ -\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t" /* ABCDEFGH */\ - "movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\ - "movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\ - "punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\ - "punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\ - "pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\ - "pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\ - "movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\ - "movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\ - "psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\ - "psllq $16, %%mm3 \n\t" /* 00ABCDEF */\ - "psllq $24, %%mm4 \n\t" /* 000ABCDE */\ - "punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\ - "punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\ - "punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\ - "paddw %%mm3, %%mm5 \n\t" /* b */\ - "paddw %%mm2, %%mm6 \n\t" /* c */\ - "paddw %%mm5, %%mm5 \n\t" /* 2b */\ - "psubw %%mm5, %%mm6 \n\t" /* c - 2b */\ - "pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\ - "pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\ - "paddw %%mm4, %%mm0 \n\t" /* a */\ - "paddw %%mm1, %%mm5 \n\t" /* d */\ - "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\ - "psubw %%mm5, %%mm0 \n\t" /* 20a - d */\ - "paddw %6, %%mm6 \n\t"\ - "paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\ - "psraw $5, %%mm0 \n\t"\ - /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\ - \ - "movd 5(%0), %%mm5 \n\t" /* FGHI */\ - "punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\ - "pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\ - "paddw %%mm5, %%mm1 \n\t" /* a */\ - "paddw %%mm6, %%mm2 \n\t" /* b */\ - "pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\ - "pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\ - "paddw %%mm6, %%mm3 \n\t" /* c */\ - "paddw %%mm5, %%mm4 \n\t" /* d */\ - "paddw %%mm2, %%mm2 \n\t" /* 2b */\ - "psubw %%mm2, %%mm3 \n\t" /* c - 2b */\ - "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\ - "pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\ - "psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\ - "paddw %6, %%mm1 \n\t"\ - "paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\ - "psraw $5, %%mm3 \n\t"\ - "packuswb %%mm3, %%mm0 \n\t"\ - OP_MMX2(%%mm0, (%1), %%mm4, q)\ - \ - "add %3, %0 \n\t"\ - "add %4, %1 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - : "+a"(src), "+c"(dst), "+m"(h)\ - : "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\ - : "memory"\ - );\ -}\ -\ -static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ - int i;\ - int16_t temp[8];\ - /* quick HACK, XXX FIXME MUST be optimized */\ - for(i=0; i<h; i++)\ - {\ - temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\ - temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\ - temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\ - temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\ - temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\ - temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\ - temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\ - temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\ - asm volatile(\ - "movq (%0), %%mm0 \n\t"\ - "movq 8(%0), %%mm1 \n\t"\ - "paddw %2, %%mm0 \n\t"\ - "paddw %2, %%mm1 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "psraw $5, %%mm1 \n\t"\ - "packuswb %%mm1, %%mm0 \n\t"\ - OP_3DNOW(%%mm0, (%1), %%mm1, q)\ - :: "r"(temp), "r"(dst), "m"(ROUNDER)\ - :"memory"\ - );\ - dst+=dstStride;\ - src+=srcStride;\ - }\ -} - -#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\ -\ -static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - uint64_t temp[17*4];\ - uint64_t *temp_ptr= temp;\ - int count= 17;\ -\ - /*FIXME unroll */\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t"\ - "movq (%0), %%mm1 \n\t"\ - "movq 8(%0), %%mm2 \n\t"\ - "movq 8(%0), %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpckhbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpckhbw %%mm7, %%mm3 \n\t"\ - "movq %%mm0, (%1) \n\t"\ - "movq %%mm1, 17*8(%1) \n\t"\ - "movq %%mm2, 2*17*8(%1) \n\t"\ - "movq %%mm3, 3*17*8(%1) \n\t"\ - "add $8, %1 \n\t"\ - "add %3, %0 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - : "+r" (src), "+r" (temp_ptr), "+r"(count)\ - : "r" ((long)srcStride)\ - : "memory"\ - );\ - \ - temp_ptr= temp;\ - count=4;\ - \ -/*FIXME reorder for speed */\ - asm volatile(\ - /*"pxor %%mm7, %%mm7 \n\t"*/\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t"\ - "movq 8(%0), %%mm1 \n\t"\ - "movq 16(%0), %%mm2 \n\t"\ - "movq 24(%0), %%mm3 \n\t"\ - QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ - QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ - \ - QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ - QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\ - QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\ - QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\ - QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\ - \ - QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\ - "add %4, %1 \n\t" \ - QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\ - QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\ - \ - "add $136, %0 \n\t"\ - "add %6, %1 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - \ - : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ - : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\ - :"memory"\ - );\ -}\ -\ -static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - uint64_t temp[9*2];\ - uint64_t *temp_ptr= temp;\ - int count= 9;\ -\ - /*FIXME unroll */\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t"\ - "movq (%0), %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpckhbw %%mm7, %%mm1 \n\t"\ - "movq %%mm0, (%1) \n\t"\ - "movq %%mm1, 9*8(%1) \n\t"\ - "add $8, %1 \n\t"\ - "add %3, %0 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - : "+r" (src), "+r" (temp_ptr), "+r"(count)\ - : "r" ((long)srcStride)\ - : "memory"\ - );\ - \ - temp_ptr= temp;\ - count=2;\ - \ -/*FIXME reorder for speed */\ - asm volatile(\ - /*"pxor %%mm7, %%mm7 \n\t"*/\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t"\ - "movq 8(%0), %%mm1 \n\t"\ - "movq 16(%0), %%mm2 \n\t"\ - "movq 24(%0), %%mm3 \n\t"\ - QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\ - QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\ - \ - QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\ - \ - QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\ - "add %4, %1 \n\t"\ - QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\ - QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\ - \ - "add $72, %0 \n\t"\ - "add %6, %1 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - \ - : "+r"(temp_ptr), "+r"(dst), "+g"(count)\ - : "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\ - : "memory"\ - );\ -}\ -\ -static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## pixels8_mmx(dst, src, stride, 8);\ -}\ -\ -static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[8];\ - uint8_t * const half= (uint8_t*)temp;\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ - OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ -}\ -\ -static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\ -}\ -\ -static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[8];\ - uint8_t * const half= (uint8_t*)temp;\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\ - OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\ -}\ -\ -static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[8];\ - uint8_t * const half= (uint8_t*)temp;\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ - OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\ -}\ -\ -static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\ -}\ -\ -static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[8];\ - uint8_t * const half= (uint8_t*)temp;\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\ - OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\ -}\ -static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[8 + 9];\ - uint8_t * const halfH= ((uint8_t*)half) + 64;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ -}\ -static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[8 + 9];\ - uint8_t * const halfH= ((uint8_t*)half) + 64;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ -}\ -static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[8 + 9];\ - uint8_t * const halfH= ((uint8_t*)half) + 64;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ -}\ -static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[8 + 9];\ - uint8_t * const halfH= ((uint8_t*)half) + 64;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ -}\ -static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[8 + 9];\ - uint8_t * const halfH= ((uint8_t*)half) + 64;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\ -}\ -static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[8 + 9];\ - uint8_t * const halfH= ((uint8_t*)half) + 64;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\ - OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\ -}\ -static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[8 + 9];\ - uint8_t * const halfH= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\ - OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ -}\ -static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[8 + 9];\ - uint8_t * const halfH= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\ - OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ -}\ -static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[9];\ - uint8_t * const halfH= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\ - OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\ -}\ -static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## pixels16_mmx(dst, src, stride, 16);\ -}\ -\ -static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[32];\ - uint8_t * const half= (uint8_t*)temp;\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ - OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ -}\ -\ -static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\ -}\ -\ -static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[32];\ - uint8_t * const half= (uint8_t*)temp;\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\ - OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\ -}\ -\ -static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[32];\ - uint8_t * const half= (uint8_t*)temp;\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ - OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\ -}\ -\ -static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\ -}\ -\ -static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[32];\ - uint8_t * const half= (uint8_t*)temp;\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\ - OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\ -}\ -static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 256;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ -}\ -static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 256;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ -}\ -static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 256;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ -}\ -static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 256;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ -}\ -static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 256;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\ -}\ -static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[16*2 + 17*2];\ - uint8_t * const halfH= ((uint8_t*)half) + 256;\ - uint8_t * const halfHV= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\ - OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\ -}\ -static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[17*2];\ - uint8_t * const halfH= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\ - OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ -}\ -static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[17*2];\ - uint8_t * const halfH= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\ - OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ -}\ -static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t half[17*2];\ - uint8_t * const halfH= ((uint8_t*)half);\ - put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\ - OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\ -} - -#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t" -#define AVG_3DNOW_OP(a,b,temp, size) \ -"mov" #size " " #b ", " #temp " \n\t"\ -"pavgusb " #temp ", " #a " \n\t"\ -"mov" #size " " #a ", " #b " \n\t" -#define AVG_MMX2_OP(a,b,temp, size) \ -"mov" #size " " #b ", " #temp " \n\t"\ -"pavgb " #temp ", " #a " \n\t"\ -"mov" #size " " #a ", " #b " \n\t" - -QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP) -QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP) -QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP) -QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow) -QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow) -QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow) -QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2) -QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2) -QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2) - -/***********************************/ -/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */ - -#define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\ -static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\ -} -#define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\ -static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\ -} - -#define QPEL_2TAP(OPNAME, SIZE, MMX)\ -QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\ -QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\ -QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\ -static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\ - OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\ -static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\ - OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\ -static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\ - OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\ -static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\ -}\ -static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\ -}\ -QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0, 1, 0)\ -QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1, -1, 0)\ -QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0, stride, 0)\ -QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride, -stride, 0)\ -QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0, stride, 1)\ -QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1, stride, -1)\ -QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride, -stride, 1)\ -QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\ - -QPEL_2TAP(put_, 16, mmx2) -QPEL_2TAP(avg_, 16, mmx2) -QPEL_2TAP(put_, 8, mmx2) -QPEL_2TAP(avg_, 8, mmx2) -QPEL_2TAP(put_, 16, 3dnow) -QPEL_2TAP(avg_, 16, 3dnow) -QPEL_2TAP(put_, 8, 3dnow) -QPEL_2TAP(avg_, 8, 3dnow) - - -#if 0 -static void just_return() { return; } -#endif - -#define SET_QPEL_FUNC(postfix1, postfix2) \ - c->put_ ## postfix1 = put_ ## postfix2;\ - c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\ - c->avg_ ## postfix1 = avg_ ## postfix2; - -static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, - int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){ - const int w = 8; - const int ix = ox>>(16+shift); - const int iy = oy>>(16+shift); - const int oxs = ox>>4; - const int oys = oy>>4; - const int dxxs = dxx>>4; - const int dxys = dxy>>4; - const int dyxs = dyx>>4; - const int dyys = dyy>>4; - const uint16_t r4[4] = {r,r,r,r}; - const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys}; - const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys}; - const uint64_t shift2 = 2*shift; - uint8_t edge_buf[(h+1)*stride]; - int x, y; - - const int dxw = (dxx-(1<<(16+shift)))*(w-1); - const int dyh = (dyy-(1<<(16+shift)))*(h-1); - const int dxh = dxy*(h-1); - const int dyw = dyx*(w-1); - if( // non-constant fullpel offset (3% of blocks) - (ox^(ox+dxw) | ox^(ox+dxh) | ox^(ox+dxw+dxh) | - oy^(oy+dyw) | oy^(oy+dyh) | oy^(oy+dyw+dyh)) >> (16+shift) - // uses more than 16 bits of subpel mv (only at huge resolution) - || (dxx|dxy|dyx|dyy)&15 ) - { - //FIXME could still use mmx for some of the rows - ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height); - return; - } - - src += ix + iy*stride; - if( (unsigned)ix >= width-w || - (unsigned)iy >= height-h ) - { - ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height); - src = edge_buf; - } - - asm volatile( - "movd %0, %%mm6 \n\t" - "pxor %%mm7, %%mm7 \n\t" - "punpcklwd %%mm6, %%mm6 \n\t" - "punpcklwd %%mm6, %%mm6 \n\t" - :: "r"(1<<shift) - ); - - for(x=0; x<w; x+=4){ - uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0), - oxs - dxys + dxxs*(x+1), - oxs - dxys + dxxs*(x+2), - oxs - dxys + dxxs*(x+3) }; - uint16_t dy4[4] = { oys - dyys + dyxs*(x+0), - oys - dyys + dyxs*(x+1), - oys - dyys + dyxs*(x+2), - oys - dyys + dyxs*(x+3) }; - - for(y=0; y<h; y++){ - asm volatile( - "movq %0, %%mm4 \n\t" - "movq %1, %%mm5 \n\t" - "paddw %2, %%mm4 \n\t" - "paddw %3, %%mm5 \n\t" - "movq %%mm4, %0 \n\t" - "movq %%mm5, %1 \n\t" - "psrlw $12, %%mm4 \n\t" - "psrlw $12, %%mm5 \n\t" - : "+m"(*dx4), "+m"(*dy4) - : "m"(*dxy4), "m"(*dyy4) - ); - - asm volatile( - "movq %%mm6, %%mm2 \n\t" - "movq %%mm6, %%mm1 \n\t" - "psubw %%mm4, %%mm2 \n\t" - "psubw %%mm5, %%mm1 \n\t" - "movq %%mm2, %%mm0 \n\t" - "movq %%mm4, %%mm3 \n\t" - "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy) - "pmullw %%mm5, %%mm3 \n\t" // dx*dy - "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy - "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy) - - "movd %3, %%mm5 \n\t" - "movd %2, %%mm4 \n\t" - "punpcklbw %%mm7, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy - "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy - - "movd %1, %%mm5 \n\t" - "movd %0, %%mm4 \n\t" - "punpcklbw %%mm7, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy) - "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy) - "paddw %4, %%mm1 \n\t" - "paddw %%mm3, %%mm2 \n\t" - "paddw %%mm1, %%mm0 \n\t" - "paddw %%mm2, %%mm0 \n\t" - - "psrlw %5, %%mm0 \n\t" - "packuswb %%mm0, %%mm0 \n\t" - - : - : "m"(src[0]), "m"(src[1]), - "m"(src[stride]), "m"(src[stride+1]), - "m"(*r4), "m"(shift2) - ); - - asm volatile( - "movd %%mm0, %0 \n\t" - - : "=m"(dst[x+y*stride]) - : - ); - src += stride; - } - src += 4-h*stride; - } -} - -#ifdef CONFIG_ENCODERS -static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){ - long i=0; - - assert(FFABS(scale) < 256); - scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT; - - asm volatile( - "pcmpeqw %%mm6, %%mm6 \n\t" // -1w - "psrlw $15, %%mm6 \n\t" // 1w - "pxor %%mm7, %%mm7 \n\t" - "movd %4, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq 8(%1, %0), %%mm1 \n\t" - "pmulhw %%mm5, %%mm0 \n\t" - "pmulhw %%mm5, %%mm1 \n\t" - "paddw %%mm6, %%mm0 \n\t" - "paddw %%mm6, %%mm1 \n\t" - "psraw $1, %%mm0 \n\t" - "psraw $1, %%mm1 \n\t" - "paddw (%2, %0), %%mm0 \n\t" - "paddw 8(%2, %0), %%mm1 \n\t" - "psraw $6, %%mm0 \n\t" - "psraw $6, %%mm1 \n\t" - "pmullw (%3, %0), %%mm0 \n\t" - "pmullw 8(%3, %0), %%mm1 \n\t" - "pmaddwd %%mm0, %%mm0 \n\t" - "pmaddwd %%mm1, %%mm1 \n\t" - "paddd %%mm1, %%mm0 \n\t" - "psrld $4, %%mm0 \n\t" - "paddd %%mm0, %%mm7 \n\t" - "add $16, %0 \n\t" - "cmp $128, %0 \n\t" //FIXME optimize & bench - " jb 1b \n\t" - "movq %%mm7, %%mm6 \n\t" - "psrlq $32, %%mm7 \n\t" - "paddd %%mm6, %%mm7 \n\t" - "psrld $2, %%mm7 \n\t" - "movd %%mm7, %0 \n\t" - - : "+r" (i) - : "r"(basis), "r"(rem), "r"(weight), "g"(scale) - ); - return i; -} - -static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){ - long i=0; - - if(FFABS(scale) < 256){ - scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT; - asm volatile( - "pcmpeqw %%mm6, %%mm6 \n\t" // -1w - "psrlw $15, %%mm6 \n\t" // 1w - "movd %3, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - "punpcklwd %%mm5, %%mm5 \n\t" - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq 8(%1, %0), %%mm1 \n\t" - "pmulhw %%mm5, %%mm0 \n\t" - "pmulhw %%mm5, %%mm1 \n\t" - "paddw %%mm6, %%mm0 \n\t" - "paddw %%mm6, %%mm1 \n\t" - "psraw $1, %%mm0 \n\t" - "psraw $1, %%mm1 \n\t" - "paddw (%2, %0), %%mm0 \n\t" - "paddw 8(%2, %0), %%mm1 \n\t" - "movq %%mm0, (%2, %0) \n\t" - "movq %%mm1, 8(%2, %0) \n\t" - "add $16, %0 \n\t" - "cmp $128, %0 \n\t" //FIXME optimize & bench - " jb 1b \n\t" - - : "+r" (i) - : "r"(basis), "r"(rem), "g"(scale) - ); - }else{ - for(i=0; i<8*8; i++){ - rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT); - } - } -} -#endif /* CONFIG_ENCODERS */ - -#define PREFETCH(name, op) \ -static void name(void *mem, int stride, int h){\ - const uint8_t *p= mem;\ - do{\ - asm volatile(#op" %0" :: "m"(*p));\ - p+= stride;\ - }while(--h);\ -} -PREFETCH(prefetch_mmx2, prefetcht0) -PREFETCH(prefetch_3dnow, prefetch) -#undef PREFETCH - -#include "h264dsp_mmx.c" - -/* AVS specific */ -void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx); - -void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { - put_pixels8_mmx(dst, src, stride, 8); -} -void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { - avg_pixels8_mmx(dst, src, stride, 8); -} -void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { - put_pixels16_mmx(dst, src, stride, 16); -} -void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) { - avg_pixels16_mmx(dst, src, stride, 16); -} - -/* external functions, from idct_mmx.c */ -void ff_mmx_idct(DCTELEM *block); -void ff_mmxext_idct(DCTELEM *block); - -void ff_vp3_idct_sse2(int16_t *input_data); -void ff_vp3_idct_mmx(int16_t *data); -void ff_vp3_dsp_init_mmx(void); - -/* XXX: those functions should be suppressed ASAP when all IDCTs are - converted */ -#ifdef CONFIG_GPL -static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_mmx_idct (block); - put_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_mmx_idct (block); - add_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_mmxext_idct (block); - put_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_mmxext_idct (block); - add_pixels_clamped_mmx(block, dest, line_size); -} -#endif -static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_vp3_idct_sse2(block); - put_signed_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_vp3_idct_sse2(block); - add_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_vp3_idct_mmx(block); - put_signed_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_vp3_idct_mmx(block); - add_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_idct_xvid_mmx (block); - put_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_idct_xvid_mmx (block); - add_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_idct_xvid_mmx2 (block); - put_pixels_clamped_mmx(block, dest, line_size); -} -static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block) -{ - ff_idct_xvid_mmx2 (block); - add_pixels_clamped_mmx(block, dest, line_size); -} - -static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize) -{ - int i; - asm volatile("pxor %%mm7, %%mm7":); - for(i=0; i<blocksize; i+=2) { - asm volatile( - "movq %0, %%mm0 \n\t" - "movq %1, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm1, %%mm3 \n\t" - "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0 - "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0 - "pslld $31, %%mm2 \n\t" // keep only the sign bit - "pxor %%mm2, %%mm1 \n\t" - "movq %%mm3, %%mm4 \n\t" - "pand %%mm1, %%mm3 \n\t" - "pandn %%mm1, %%mm4 \n\t" - "pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) - "pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) - "movq %%mm3, %1 \n\t" - "movq %%mm0, %0 \n\t" - :"+m"(mag[i]), "+m"(ang[i]) - ::"memory" - ); - } - asm volatile("femms"); -} -static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize) -{ - int i; - - asm volatile( - "movaps %0, %%xmm5 \n\t" - ::"m"(ff_pdw_80000000[0]) - ); - for(i=0; i<blocksize; i+=4) { - asm volatile( - "movaps %0, %%xmm0 \n\t" - "movaps %1, %%xmm1 \n\t" - "xorps %%xmm2, %%xmm2 \n\t" - "xorps %%xmm3, %%xmm3 \n\t" - "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0 - "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0 - "andps %%xmm5, %%xmm2 \n\t" // keep only the sign bit - "xorps %%xmm2, %%xmm1 \n\t" - "movaps %%xmm3, %%xmm4 \n\t" - "andps %%xmm1, %%xmm3 \n\t" - "andnps %%xmm1, %%xmm4 \n\t" - "addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m))) - "subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m))) - "movaps %%xmm3, %1 \n\t" - "movaps %%xmm0, %0 \n\t" - :"+m"(mag[i]), "+m"(ang[i]) - ::"memory" - ); - } -} - -static void vector_fmul_3dnow(float *dst, const float *src, int len){ - long i = (len-4)*4; - asm volatile( - "1: \n\t" - "movq (%1,%0), %%mm0 \n\t" - "movq 8(%1,%0), %%mm1 \n\t" - "pfmul (%2,%0), %%mm0 \n\t" - "pfmul 8(%2,%0), %%mm1 \n\t" - "movq %%mm0, (%1,%0) \n\t" - "movq %%mm1, 8(%1,%0) \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - "femms \n\t" - :"+r"(i) - :"r"(dst), "r"(src) - :"memory" - ); -} -static void vector_fmul_sse(float *dst, const float *src, int len){ - long i = (len-8)*4; - asm volatile( - "1: \n\t" - "movaps (%1,%0), %%xmm0 \n\t" - "movaps 16(%1,%0), %%xmm1 \n\t" - "mulps (%2,%0), %%xmm0 \n\t" - "mulps 16(%2,%0), %%xmm1 \n\t" - "movaps %%xmm0, (%1,%0) \n\t" - "movaps %%xmm1, 16(%1,%0) \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src) - :"memory" - ); -} - -static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){ - long i = len*4-16; - asm volatile( - "1: \n\t" - "pswapd 8(%1), %%mm0 \n\t" - "pswapd (%1), %%mm1 \n\t" - "pfmul (%3,%0), %%mm0 \n\t" - "pfmul 8(%3,%0), %%mm1 \n\t" - "movq %%mm0, (%2,%0) \n\t" - "movq %%mm1, 8(%2,%0) \n\t" - "add $16, %1 \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - :"+r"(i), "+r"(src1) - :"r"(dst), "r"(src0) - ); - asm volatile("femms"); -} -static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){ - long i = len*4-32; - asm volatile( - "1: \n\t" - "movaps 16(%1), %%xmm0 \n\t" - "movaps (%1), %%xmm1 \n\t" - "shufps $0x1b, %%xmm0, %%xmm0 \n\t" - "shufps $0x1b, %%xmm1, %%xmm1 \n\t" - "mulps (%3,%0), %%xmm0 \n\t" - "mulps 16(%3,%0), %%xmm1 \n\t" - "movaps %%xmm0, (%2,%0) \n\t" - "movaps %%xmm1, 16(%2,%0) \n\t" - "add $32, %1 \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i), "+r"(src1) - :"r"(dst), "r"(src0) - ); -} - -static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1, - const float *src2, int src3, int len, int step){ - long i = (len-4)*4; - if(step == 2 && src3 == 0){ - dst += (len-4)*2; - asm volatile( - "1: \n\t" - "movq (%2,%0), %%mm0 \n\t" - "movq 8(%2,%0), %%mm1 \n\t" - "pfmul (%3,%0), %%mm0 \n\t" - "pfmul 8(%3,%0), %%mm1 \n\t" - "pfadd (%4,%0), %%mm0 \n\t" - "pfadd 8(%4,%0), %%mm1 \n\t" - "movd %%mm0, (%1) \n\t" - "movd %%mm1, 16(%1) \n\t" - "psrlq $32, %%mm0 \n\t" - "psrlq $32, %%mm1 \n\t" - "movd %%mm0, 8(%1) \n\t" - "movd %%mm1, 24(%1) \n\t" - "sub $32, %1 \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - :"+r"(i), "+r"(dst) - :"r"(src0), "r"(src1), "r"(src2) - :"memory" - ); - } - else if(step == 1 && src3 == 0){ - asm volatile( - "1: \n\t" - "movq (%2,%0), %%mm0 \n\t" - "movq 8(%2,%0), %%mm1 \n\t" - "pfmul (%3,%0), %%mm0 \n\t" - "pfmul 8(%3,%0), %%mm1 \n\t" - "pfadd (%4,%0), %%mm0 \n\t" - "pfadd 8(%4,%0), %%mm1 \n\t" - "movq %%mm0, (%1,%0) \n\t" - "movq %%mm1, 8(%1,%0) \n\t" - "sub $16, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src0), "r"(src1), "r"(src2) - :"memory" - ); - } - else - ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); - asm volatile("femms"); -} -static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1, - const float *src2, int src3, int len, int step){ - long i = (len-8)*4; - if(step == 2 && src3 == 0){ - dst += (len-8)*2; - asm volatile( - "1: \n\t" - "movaps (%2,%0), %%xmm0 \n\t" - "movaps 16(%2,%0), %%xmm1 \n\t" - "mulps (%3,%0), %%xmm0 \n\t" - "mulps 16(%3,%0), %%xmm1 \n\t" - "addps (%4,%0), %%xmm0 \n\t" - "addps 16(%4,%0), %%xmm1 \n\t" - "movss %%xmm0, (%1) \n\t" - "movss %%xmm1, 32(%1) \n\t" - "movhlps %%xmm0, %%xmm2 \n\t" - "movhlps %%xmm1, %%xmm3 \n\t" - "movss %%xmm2, 16(%1) \n\t" - "movss %%xmm3, 48(%1) \n\t" - "shufps $0xb1, %%xmm0, %%xmm0 \n\t" - "shufps $0xb1, %%xmm1, %%xmm1 \n\t" - "movss %%xmm0, 8(%1) \n\t" - "movss %%xmm1, 40(%1) \n\t" - "movhlps %%xmm0, %%xmm2 \n\t" - "movhlps %%xmm1, %%xmm3 \n\t" - "movss %%xmm2, 24(%1) \n\t" - "movss %%xmm3, 56(%1) \n\t" - "sub $64, %1 \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i), "+r"(dst) - :"r"(src0), "r"(src1), "r"(src2) - :"memory" - ); - } - else if(step == 1 && src3 == 0){ - asm volatile( - "1: \n\t" - "movaps (%2,%0), %%xmm0 \n\t" - "movaps 16(%2,%0), %%xmm1 \n\t" - "mulps (%3,%0), %%xmm0 \n\t" - "mulps 16(%3,%0), %%xmm1 \n\t" - "addps (%4,%0), %%xmm0 \n\t" - "addps 16(%4,%0), %%xmm1 \n\t" - "movaps %%xmm0, (%1,%0) \n\t" - "movaps %%xmm1, 16(%1,%0) \n\t" - "sub $32, %0 \n\t" - "jge 1b \n\t" - :"+r"(i) - :"r"(dst), "r"(src0), "r"(src1), "r"(src2) - :"memory" - ); - } - else - ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step); -} - -static void float_to_int16_3dnow(int16_t *dst, const float *src, int len){ - // not bit-exact: pf2id uses different rounding than C and SSE - int i; - for(i=0; i<len; i+=4) { - asm volatile( - "pf2id %1, %%mm0 \n\t" - "pf2id %2, %%mm1 \n\t" - "packssdw %%mm1, %%mm0 \n\t" - "movq %%mm0, %0 \n\t" - :"=m"(dst[i]) - :"m"(src[i]), "m"(src[i+2]) - ); - } - asm volatile("femms"); -} -static void float_to_int16_sse(int16_t *dst, const float *src, int len){ - int i; - for(i=0; i<len; i+=4) { - asm volatile( - "cvtps2pi %1, %%mm0 \n\t" - "cvtps2pi %2, %%mm1 \n\t" - "packssdw %%mm1, %%mm0 \n\t" - "movq %%mm0, %0 \n\t" - :"=m"(dst[i]) - :"m"(src[i]), "m"(src[i+2]) - ); - } - asm volatile("emms"); -} - -#ifdef CONFIG_SNOW_ENCODER -extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width); -extern void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width); -extern void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width); -extern void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width); -extern void ff_snow_inner_add_yblock_sse2(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, - int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); -extern void ff_snow_inner_add_yblock_mmx(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, - int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8); -#endif - -void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx) -{ - mm_flags = mm_support(); - - if (avctx->dsp_mask) { - if (avctx->dsp_mask & FF_MM_FORCE) - mm_flags |= (avctx->dsp_mask & 0xffff); - else - mm_flags &= ~(avctx->dsp_mask & 0xffff); - } - -#if 0 - av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:"); - if (mm_flags & MM_MMX) - av_log(avctx, AV_LOG_INFO, " mmx"); - if (mm_flags & MM_MMXEXT) - av_log(avctx, AV_LOG_INFO, " mmxext"); - if (mm_flags & MM_3DNOW) - av_log(avctx, AV_LOG_INFO, " 3dnow"); - if (mm_flags & MM_SSE) - av_log(avctx, AV_LOG_INFO, " sse"); - if (mm_flags & MM_SSE2) - av_log(avctx, AV_LOG_INFO, " sse2"); - av_log(avctx, AV_LOG_INFO, "\n"); -#endif - - if (mm_flags & MM_MMX) { - const int idct_algo= avctx->idct_algo; - -#ifdef CONFIG_ENCODERS - const int dct_algo = avctx->dct_algo; - if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ - if(mm_flags & MM_SSE2){ - c->fdct = ff_fdct_sse2; - }else if(mm_flags & MM_MMXEXT){ - c->fdct = ff_fdct_mmx2; - }else{ - c->fdct = ff_fdct_mmx; - } - } -#endif //CONFIG_ENCODERS - if(avctx->lowres==0){ - if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){ - c->idct_put= ff_simple_idct_put_mmx; - c->idct_add= ff_simple_idct_add_mmx; - c->idct = ff_simple_idct_mmx; - c->idct_permutation_type= FF_SIMPLE_IDCT_PERM; -#ifdef CONFIG_GPL - }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){ - if(mm_flags & MM_MMXEXT){ - c->idct_put= ff_libmpeg2mmx2_idct_put; - c->idct_add= ff_libmpeg2mmx2_idct_add; - c->idct = ff_mmxext_idct; - }else{ - c->idct_put= ff_libmpeg2mmx_idct_put; - c->idct_add= ff_libmpeg2mmx_idct_add; - c->idct = ff_mmx_idct; - } - c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM; -#endif - }else if(idct_algo==FF_IDCT_VP3 && - avctx->codec->id!=CODEC_ID_THEORA && - !(avctx->flags & CODEC_FLAG_BITEXACT)){ - if(mm_flags & MM_SSE2){ - c->idct_put= ff_vp3_idct_put_sse2; - c->idct_add= ff_vp3_idct_add_sse2; - c->idct = ff_vp3_idct_sse2; - c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; - }else{ - ff_vp3_dsp_init_mmx(); - c->idct_put= ff_vp3_idct_put_mmx; - c->idct_add= ff_vp3_idct_add_mmx; - c->idct = ff_vp3_idct_mmx; - c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM; - } - }else if(idct_algo==FF_IDCT_CAVS){ - c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM; - }else if(idct_algo==FF_IDCT_XVIDMMX){ - if(mm_flags & MM_MMXEXT){ - c->idct_put= ff_idct_xvid_mmx2_put; - c->idct_add= ff_idct_xvid_mmx2_add; - c->idct = ff_idct_xvid_mmx2; - }else{ - c->idct_put= ff_idct_xvid_mmx_put; - c->idct_add= ff_idct_xvid_mmx_add; - c->idct = ff_idct_xvid_mmx; - } - } - } - -#ifdef CONFIG_ENCODERS - c->get_pixels = get_pixels_mmx; - c->diff_pixels = diff_pixels_mmx; -#endif //CONFIG_ENCODERS - c->put_pixels_clamped = put_pixels_clamped_mmx; - c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx; - c->add_pixels_clamped = add_pixels_clamped_mmx; - c->clear_blocks = clear_blocks_mmx; -#ifdef CONFIG_ENCODERS - c->pix_sum = pix_sum16_mmx; -#endif //CONFIG_ENCODERS - - c->put_pixels_tab[0][0] = put_pixels16_mmx; - c->put_pixels_tab[0][1] = put_pixels16_x2_mmx; - c->put_pixels_tab[0][2] = put_pixels16_y2_mmx; - c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx; - - c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx; - c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx; - c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx; - c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx; - - c->avg_pixels_tab[0][0] = avg_pixels16_mmx; - c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx; - c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx; - c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx; - - c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx; - c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx; - c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx; - c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx; - - c->put_pixels_tab[1][0] = put_pixels8_mmx; - c->put_pixels_tab[1][1] = put_pixels8_x2_mmx; - c->put_pixels_tab[1][2] = put_pixels8_y2_mmx; - c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx; - - c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx; - c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx; - c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx; - c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx; - - c->avg_pixels_tab[1][0] = avg_pixels8_mmx; - c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx; - c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx; - c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx; - - c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx; - c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx; - c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx; - c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx; - - c->gmc= gmc_mmx; - - c->add_bytes= add_bytes_mmx; -#ifdef CONFIG_ENCODERS - c->diff_bytes= diff_bytes_mmx; - - c->hadamard8_diff[0]= hadamard8_diff16_mmx; - c->hadamard8_diff[1]= hadamard8_diff_mmx; - - c->pix_norm1 = pix_norm1_mmx; - c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx; - c->sse[1] = sse8_mmx; - c->vsad[4]= vsad_intra16_mmx; - - c->nsse[0] = nsse16_mmx; - c->nsse[1] = nsse8_mmx; - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->vsad[0] = vsad16_mmx; - } - - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->try_8x8basis= try_8x8basis_mmx; - } - c->add_8x8basis= add_8x8basis_mmx; - -#endif //CONFIG_ENCODERS - - c->h263_v_loop_filter= h263_v_loop_filter_mmx; - c->h263_h_loop_filter= h263_h_loop_filter_mmx; - c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx; - c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx; - - c->h264_idct_dc_add= - c->h264_idct_add= ff_h264_idct_add_mmx; - c->h264_idct8_dc_add= - c->h264_idct8_add= ff_h264_idct8_add_mmx; - - if (mm_flags & MM_MMXEXT) { - c->prefetch = prefetch_mmx2; - - c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2; - c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2; - - c->avg_pixels_tab[0][0] = avg_pixels16_mmx2; - c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2; - c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2; - - c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2; - c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2; - - c->avg_pixels_tab[1][0] = avg_pixels8_mmx2; - c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2; - c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2; - -#ifdef CONFIG_ENCODERS - c->hadamard8_diff[0]= hadamard8_diff16_mmx2; - c->hadamard8_diff[1]= hadamard8_diff_mmx2; - c->vsad[4]= vsad_intra16_mmx2; -#endif //CONFIG_ENCODERS - - c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2; - c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2; - - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2; - c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2; - c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2; - c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2; - c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2; - c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2; -#ifdef CONFIG_ENCODERS - c->vsad[0] = vsad16_mmx2; -#endif //CONFIG_ENCODERS - } - -#if 1 - SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2) - SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2) -#endif - -//FIXME 3dnow too -#define dspfunc(PFX, IDX, NUM) \ - c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \ - c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \ - c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \ - c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \ - c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \ - c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \ - c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \ - c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2 - - dspfunc(put_h264_qpel, 0, 16); - dspfunc(put_h264_qpel, 1, 8); - dspfunc(put_h264_qpel, 2, 4); - dspfunc(avg_h264_qpel, 0, 16); - dspfunc(avg_h264_qpel, 1, 8); - dspfunc(avg_h264_qpel, 2, 4); - - dspfunc(put_2tap_qpel, 0, 16); - dspfunc(put_2tap_qpel, 1, 8); - dspfunc(avg_2tap_qpel, 0, 16); - dspfunc(avg_2tap_qpel, 1, 8); -#undef dspfunc - - c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2; - c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2; - c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2; - c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2; - c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2; - c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2; - c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2; - c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2; - c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2; - c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2; - c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2; - - c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2; - c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2; - c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2; - c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2; - c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2; - c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2; - c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2; - c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2; - - c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2; - c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2; - c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2; - c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2; - c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2; - c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2; - c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2; - c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2; - -#ifdef CONFIG_CAVS_DECODER - ff_cavsdsp_init_mmx2(c, avctx); -#endif - -#ifdef CONFIG_ENCODERS - c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2; -#endif //CONFIG_ENCODERS - } else if (mm_flags & MM_3DNOW) { - c->prefetch = prefetch_3dnow; - - c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow; - c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow; - - c->avg_pixels_tab[0][0] = avg_pixels16_3dnow; - c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow; - c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow; - - c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow; - c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow; - - c->avg_pixels_tab[1][0] = avg_pixels8_3dnow; - c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow; - c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow; - - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow; - c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow; - c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow; - c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow; - c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow; - c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow; - } - - SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow) - SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow) - -#define dspfunc(PFX, IDX, NUM) \ - c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \ - c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \ - c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \ - c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \ - c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \ - c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \ - c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \ - c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow - - dspfunc(put_h264_qpel, 0, 16); - dspfunc(put_h264_qpel, 1, 8); - dspfunc(put_h264_qpel, 2, 4); - dspfunc(avg_h264_qpel, 0, 16); - dspfunc(avg_h264_qpel, 1, 8); - dspfunc(avg_h264_qpel, 2, 4); - - dspfunc(put_2tap_qpel, 0, 16); - dspfunc(put_2tap_qpel, 1, 8); - dspfunc(avg_2tap_qpel, 0, 16); - dspfunc(avg_2tap_qpel, 1, 8); - - c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow; - c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow; - } - -#ifdef CONFIG_SNOW_ENCODER - if(mm_flags & MM_SSE2){ - c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; - c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; - c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; - } - else{ - c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; - c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; - c->inner_add_yblock = ff_snow_inner_add_yblock_mmx; - } -#endif - - if(mm_flags & MM_3DNOW){ - c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow; - c->vector_fmul = vector_fmul_3dnow; - if(!(avctx->flags & CODEC_FLAG_BITEXACT)) - c->float_to_int16 = float_to_int16_3dnow; - } - if(mm_flags & MM_3DNOWEXT) - c->vector_fmul_reverse = vector_fmul_reverse_3dnow2; - if(mm_flags & MM_SSE){ - c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse; - c->vector_fmul = vector_fmul_sse; - c->float_to_int16 = float_to_int16_sse; - c->vector_fmul_reverse = vector_fmul_reverse_sse; - c->vector_fmul_add_add = vector_fmul_add_add_sse; - } - if(mm_flags & MM_3DNOW) - c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse - } - -#ifdef CONFIG_ENCODERS - dsputil_init_pix_mmx(c, avctx); -#endif //CONFIG_ENCODERS -#if 0 - // for speed testing - get_pixels = just_return; - put_pixels_clamped = just_return; - add_pixels_clamped = just_return; - - pix_abs16x16 = just_return; - pix_abs16x16_x2 = just_return; - pix_abs16x16_y2 = just_return; - pix_abs16x16_xy2 = just_return; - - put_pixels_tab[0] = just_return; - put_pixels_tab[1] = just_return; - put_pixels_tab[2] = just_return; - put_pixels_tab[3] = just_return; - - put_no_rnd_pixels_tab[0] = just_return; - put_no_rnd_pixels_tab[1] = just_return; - put_no_rnd_pixels_tab[2] = just_return; - put_no_rnd_pixels_tab[3] = just_return; - - avg_pixels_tab[0] = just_return; - avg_pixels_tab[1] = just_return; - avg_pixels_tab[2] = just_return; - avg_pixels_tab[3] = just_return; - - avg_no_rnd_pixels_tab[0] = just_return; - avg_no_rnd_pixels_tab[1] = just_return; - avg_no_rnd_pixels_tab[2] = just_return; - avg_no_rnd_pixels_tab[3] = just_return; - - //av_fdct = just_return; - //ff_idct = just_return; -#endif -} diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h deleted file mode 100644 index b365cea57..000000000 --- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h +++ /dev/null @@ -1,870 +0,0 @@ -/* - * DSP utils : average functions are compiled twice for 3dnow/mmx2 - * Copyright (c) 2000, 2001 Fabrice Bellard. - * Copyright (c) 2002-2004 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * MMX optimization by Nick Kurshev <nickols_k@mail.ru> - * mostly rewritten by Michael Niedermayer <michaelni@gmx.at> - * and improved by Zdenek Kabelac <kabi@users.sf.net> - */ - -/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm - clobber bug - now it will work with 2.95.2 and also with -fPIC - */ -static void DEF(put_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm __volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $4, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "movd (%2), %%mm2 \n\t" - "movd 4(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "movd 8(%2), %%mm2 \n\t" - "movd 12(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $16, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -} - - -static void DEF(put_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm __volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_no_rnd_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm __volatile( - "pcmpeqb %%mm6, %%mm6 \n\t" - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq 16(%2), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(avg_pixels4_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm __volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movd (%1), %%mm0 \n\t" - "movd (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $4, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 4(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movd (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movd (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 8(%2), %%mm0 \n\t" - PAVGB" 12(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movd %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movd %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $16, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -} - - -static void DEF(avg_pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm __volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - "movq %%mm0, (%3) \n\t" - "add %5, %3 \n\t" - PAVGB" (%3), %%mm1 \n\t" - "movq %%mm1, (%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%1, %3), %%mm3 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 9(%1), %%mm2 \n\t" - PAVGB" 9(%1, %3), %%mm3 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm2, 8(%2) \n\t" - "movq %%mm3, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%1, %3), %%mm3 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 9(%1), %%mm2 \n\t" - PAVGB" 9(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq %%mm2, 8(%2) \n\t" - "movq %%mm3, 8(%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm __volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(avg_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm __volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" 8(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - PAVGB" 16(%2), %%mm0 \n\t" - PAVGB" 24(%2), %%mm1 \n\t" - PAVGB" (%3), %%mm0 \n\t" - PAVGB" 8(%3), %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -static void DEF(put_no_rnd_pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - __asm __volatile( - "pcmpeqb %%mm6, %%mm6 \n\t" - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%2), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 8(%1), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq 16(%2), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "pxor %%mm6, %%mm2 \n\t" - "pxor %%mm6, %%mm3 \n\t" - PAVGB" %%mm2, %%mm0 \n\t" - PAVGB" %%mm3, %%mm1 \n\t" - "pxor %%mm6, %%mm0 \n\t" - "pxor %%mm6, %%mm1 \n\t" - "movq %%mm0, (%3) \n\t" - "movq %%mm1, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -//the following should be used, though better not with gcc ... -/* :"+g"(h), "+r"(src1), "+r"(src2), "+r"(dst) - :"r"(src1Stride), "r"(dstStride) - :"memory");*/ -} - -/* GL: this function does incorrect rounding if overflow */ -static void DEF(put_no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm0 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm0 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(put_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D" (block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -/* GL: this function does incorrect rounding if overflow */ -static void DEF(put_no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "psubusb %%mm6, %%mm1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D" (block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%2), %%mm0 \n\t" - "movq (%2, %3), %%mm1 \n\t" - PAVGB" (%1), %%mm0 \n\t" - PAVGB" (%1, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%2), %%mm0 \n\t" - "movq (%2, %3), %%mm1 \n\t" - PAVGB" (%1), %%mm0 \n\t" - PAVGB" (%1, %3), %%mm1 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm2 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%1, %3), %%mm2 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm2 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm2 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm2, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -static void DEF(avg_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - "sub %3, %2 \n\t" - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - "movq (%2, %3), %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - PAVGB" %%mm3, %%mm0 \n\t" - PAVGB" %%mm4, %%mm1 \n\t" - "movq %%mm0, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - "movq (%2, %3), %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - PAVGB" %%mm3, %%mm2 \n\t" - PAVGB" %%mm4, %%mm1 \n\t" - "movq %%mm2, (%2, %3) \n\t" - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -// Note this is not correctly rounded, but this function is only used for b frames so it doesnt matter -static void DEF(avg_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BONE(mm6); - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - PAVGB" 1(%1), %%mm0 \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "psubusb %%mm6, %%mm2 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 1(%1, %%"REG_a"), %%mm2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm0 \n\t" - PAVGB" %%mm2, %%mm1 \n\t" - PAVGB" (%2), %%mm0 \n\t" - PAVGB" (%2, %3), %%mm1 \n\t" - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGB" 1(%1, %3), %%mm1 \n\t" - PAVGB" 1(%1, %%"REG_a"), %%mm0 \n\t" - "add %%"REG_a", %2 \n\t" - "add %%"REG_a", %1 \n\t" - PAVGB" %%mm1, %%mm2 \n\t" - PAVGB" %%mm0, %%mm1 \n\t" - PAVGB" (%2), %%mm2 \n\t" - PAVGB" (%2, %3), %%mm1 \n\t" - "movq %%mm2, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r" ((long)line_size) - :"%"REG_a, "memory"); -} - -//FIXME the following could be optimized too ... -static void DEF(put_no_rnd_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h); - DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h); -} -static void DEF(put_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_pixels8_y2)(block , pixels , line_size, h); - DEF(put_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(put_no_rnd_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put_no_rnd_pixels8_y2)(block , pixels , line_size, h); - DEF(put_no_rnd_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8)(block , pixels , line_size, h); - DEF(avg_pixels8)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_x2)(block , pixels , line_size, h); - DEF(avg_pixels8_x2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_y2)(block , pixels , line_size, h); - DEF(avg_pixels8_y2)(block+8, pixels+8, line_size, h); -} -static void DEF(avg_pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg_pixels8_xy2)(block , pixels , line_size, h); - DEF(avg_pixels8_xy2)(block+8, pixels+8, line_size, h); -} - -#define QPEL_2TAP_L3(OPNAME) \ -static void DEF(OPNAME ## 2tap_qpel16_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ - asm volatile(\ - "1: \n\t"\ - "movq (%1,%2), %%mm0 \n\t"\ - "movq 8(%1,%2), %%mm1 \n\t"\ - PAVGB" (%1,%3), %%mm0 \n\t"\ - PAVGB" 8(%1,%3), %%mm1 \n\t"\ - PAVGB" (%1), %%mm0 \n\t"\ - PAVGB" 8(%1), %%mm1 \n\t"\ - STORE_OP( (%1,%4),%%mm0)\ - STORE_OP(8(%1,%4),%%mm1)\ - "movq %%mm0, (%1,%4) \n\t"\ - "movq %%mm1, 8(%1,%4) \n\t"\ - "add %5, %1 \n\t"\ - "decl %0 \n\t"\ - "jnz 1b \n\t"\ - :"+g"(h), "+r"(src)\ - :"r"((long)off1), "r"((long)off2),\ - "r"((long)(dst-src)), "r"((long)stride)\ - :"memory"\ - );\ -}\ -static void DEF(OPNAME ## 2tap_qpel8_l3)(uint8_t *dst, uint8_t *src, int stride, int h, int off1, int off2){\ - asm volatile(\ - "1: \n\t"\ - "movq (%1,%2), %%mm0 \n\t"\ - PAVGB" (%1,%3), %%mm0 \n\t"\ - PAVGB" (%1), %%mm0 \n\t"\ - STORE_OP((%1,%4),%%mm0)\ - "movq %%mm0, (%1,%4) \n\t"\ - "add %5, %1 \n\t"\ - "decl %0 \n\t"\ - "jnz 1b \n\t"\ - :"+g"(h), "+r"(src)\ - :"r"((long)off1), "r"((long)off2),\ - "r"((long)(dst-src)), "r"((long)stride)\ - :"memory"\ - );\ -} - -#define STORE_OP(a,b) PAVGB" "#a","#b" \n\t" -QPEL_2TAP_L3(avg_) -#undef STORE_OP -#define STORE_OP(a,b) -QPEL_2TAP_L3(put_) -#undef STORE_OP -#undef QPEL_2TAP_L3 diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h deleted file mode 100644 index f53b34662..000000000 --- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h +++ /dev/null @@ -1,592 +0,0 @@ -/* - * DSP utils mmx functions are compiled twice for rnd/no_rnd - * Copyright (c) 2000, 2001 Fabrice Bellard. - * Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at> - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * MMX optimization by Nick Kurshev <nickols_k@mail.ru> - * mostly rewritten by Michael Niedermayer <michaelni@gmx.at> - * and improved by Zdenek Kabelac <kabi@users.sf.net> - */ - -// put_pixels -static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((long)line_size) - :REG_a, "memory"); -} - -static void attribute_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - __asm __volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "add $8, %2 \n\t" - PAVGB(%%mm0, %%mm1, %%mm4, %%mm6) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm5, (%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 16(%2), %%mm1 \n\t" - "add %4, %1 \n\t" - "movq (%1), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - "add $32, %2 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "add %5, %3 \n\t" - "movq %%mm5, (%3) \n\t" - "add %5, %3 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -} - -static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 9(%1), %%mm1 \n\t" - "movq 8(%1, %3), %%mm2 \n\t" - "movq 9(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, 8(%2) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm1 \n\t" - "movq (%1, %3), %%mm2 \n\t" - "movq 1(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "movq 8(%1), %%mm0 \n\t" - "movq 9(%1), %%mm1 \n\t" - "movq 8(%1, %3), %%mm2 \n\t" - "movq 9(%1, %3), %%mm3 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, 8(%2) \n\t" - "movq %%mm5, 8(%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((long)line_size) - :REG_a, "memory"); -} - -static void attribute_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - __asm __volatile( - "testl $1, %0 \n\t" - " jz 1f \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - "add $16, %2 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "decl %0 \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1), %%mm0 \n\t" - "movq (%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 8(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "movq (%1), %%mm0 \n\t" - "movq 16(%2), %%mm1 \n\t" - "movq 8(%1), %%mm2 \n\t" - "movq 24(%2), %%mm3 \n\t" - "add %4, %1 \n\t" - PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5) - "movq %%mm4, (%3) \n\t" - "movq %%mm5, 8(%3) \n\t" - "add %5, %3 \n\t" - "add $32, %2 \n\t" - "subl $2, %0 \n\t" - "jnz 1b \n\t" -#ifdef PIC //Note "+bm" and "+mb" are buggy too (with gcc 3.2.2 at least) and cant be used - :"+m"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#else - :"+b"(h), "+a"(src1), "+c"(src2), "+d"(dst) -#endif - :"S"((long)src1Stride), "D"((long)dstStride) - :"memory"); -} - -static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"),%%mm2 \n\t" - PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"),%%mm0 \n\t" - PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) - "movq %%mm4, (%2) \n\t" - "movq %%mm5, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((long)line_size) - :REG_a, "memory"); -} - -static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - __asm __volatile( - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm4 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" - "add %3, %1 \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "paddusw %%mm2, %%mm0 \n\t" - "paddusw %%mm3, %%mm1 \n\t" - "paddusw %%mm6, %%mm4 \n\t" - "paddusw %%mm6, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "psrlw $2, %%mm4 \n\t" - "psrlw $2, %%mm5 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "movq %%mm4, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 - "movq 1(%1, %%"REG_a"), %%mm4 \n\t" - "movq %%mm2, %%mm3 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm3, %%mm5 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm6, %%mm1 \n\t" - "paddusw %%mm4, %%mm0 \n\t" - "paddusw %%mm5, %%mm1 \n\t" - "psrlw $2, %%mm0 \n\t" - "psrlw $2, %%mm1 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "movq %%mm0, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "subl $2, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels) - :"D"(block), "r"((long)line_size) - :REG_a, "memory"); -} - -// avg_pixels -static void attribute_unused DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm __volatile( - "movd %0, %%mm0 \n\t" - "movd %1, %%mm1 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - "movd %%mm2, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -// in case more speed is needed - unroling would certainly help -static void DEF(avg, pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm __volatile( - "movq %0, %%mm0 \n\t" - "movq %1, %%mm1 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -static void DEF(avg, pixels16)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm __volatile( - "movq %0, %%mm0 \n\t" - "movq %1, %%mm1 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, %0 \n\t" - "movq 8%0, %%mm0 \n\t" - "movq 8%1, %%mm1 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - "movq %%mm2, 8%0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } - while (--h); -} - -static void DEF(avg, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm __volatile( - "movq %1, %%mm0 \n\t" - "movq 1%1, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - PAVGB(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } while (--h); -} - -static __attribute__((unused)) void DEF(avg, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm __volatile( - "movq %1, %%mm0 \n\t" - "movq %2, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - PAVGB(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - :"+m"(*dst) - :"m"(*src1), "m"(*src2) - :"memory"); - dst += dstStride; - src1 += src1Stride; - src2 += 8; - } while (--h); -} - -static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm __volatile( - "movq %1, %%mm0 \n\t" - "movq 1%1, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - PAVGB(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - "movq 8%1, %%mm0 \n\t" - "movq 9%1, %%mm1 \n\t" - "movq 8%0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - PAVGB(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, 8%0 \n\t" - :"+m"(*block) - :"m"(*pixels) - :"memory"); - pixels += line_size; - block += line_size; - } while (--h); -} - -static __attribute__((unused)) void DEF(avg, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h) -{ - MOVQ_BFE(mm6); - JUMPALIGN(); - do { - __asm __volatile( - "movq %1, %%mm0 \n\t" - "movq %2, %%mm1 \n\t" - "movq %0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - PAVGB(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, %0 \n\t" - "movq 8%1, %%mm0 \n\t" - "movq 8%2, %%mm1 \n\t" - "movq 8%0, %%mm3 \n\t" - PAVGB(%%mm0, %%mm1, %%mm2, %%mm6) - PAVGB(%%mm3, %%mm2, %%mm0, %%mm6) - "movq %%mm0, 8%0 \n\t" - :"+m"(*dst) - :"m"(*src1), "m"(*src2) - :"memory"); - dst += dstStride; - src1 += src1Stride; - src2 += 16; - } while (--h); -} - -static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_BFE(mm6); - __asm __volatile( - "lea (%3, %3), %%"REG_a" \n\t" - "movq (%1), %%mm0 \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5) - "movq (%2), %%mm3 \n\t" - PAVGB(%%mm3, %%mm4, %%mm0, %%mm6) - "movq (%2, %3), %%mm3 \n\t" - PAVGB(%%mm3, %%mm5, %%mm1, %%mm6) - "movq %%mm0, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - - "movq (%1, %3), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5) - "movq (%2), %%mm3 \n\t" - PAVGB(%%mm3, %%mm4, %%mm2, %%mm6) - "movq (%2, %3), %%mm3 \n\t" - PAVGB(%%mm3, %%mm5, %%mm1, %%mm6) - "movq %%mm2, (%2) \n\t" - "movq %%mm1, (%2, %3) \n\t" - "add %%"REG_a", %1 \n\t" - "add %%"REG_a", %2 \n\t" - - "subl $4, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels), "+D"(block) - :"r"((long)line_size) - :REG_a, "memory"); -} - -// this routine is 'slightly' suboptimal but mostly unused -static void DEF(avg, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h) -{ - MOVQ_ZERO(mm7); - SET_RND(mm6); // =2 for rnd and =1 for no_rnd version - __asm __volatile( - "movq (%1), %%mm0 \n\t" - "movq 1(%1), %%mm4 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "xor %%"REG_a", %%"REG_a" \n\t" - "add %3, %1 \n\t" - ASMALIGN(3) - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm2, %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "paddusw %%mm2, %%mm0 \n\t" - "paddusw %%mm3, %%mm1 \n\t" - "paddusw %%mm6, %%mm4 \n\t" - "paddusw %%mm6, %%mm5 \n\t" - "paddusw %%mm0, %%mm4 \n\t" - "paddusw %%mm1, %%mm5 \n\t" - "psrlw $2, %%mm4 \n\t" - "psrlw $2, %%mm5 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "packuswb %%mm5, %%mm4 \n\t" - "pcmpeqd %%mm2, %%mm2 \n\t" - "paddb %%mm2, %%mm2 \n\t" - PAVGB(%%mm3, %%mm4, %%mm5, %%mm2) - "movq %%mm5, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "movq (%1, %%"REG_a"), %%mm2 \n\t" // 0 <-> 2 1 <-> 3 - "movq 1(%1, %%"REG_a"), %%mm4 \n\t" - "movq %%mm2, %%mm3 \n\t" - "movq %%mm4, %%mm5 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "punpckhbw %%mm7, %%mm5 \n\t" - "paddusw %%mm2, %%mm4 \n\t" - "paddusw %%mm3, %%mm5 \n\t" - "paddusw %%mm6, %%mm0 \n\t" - "paddusw %%mm6, %%mm1 \n\t" - "paddusw %%mm4, %%mm0 \n\t" - "paddusw %%mm5, %%mm1 \n\t" - "psrlw $2, %%mm0 \n\t" - "psrlw $2, %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "packuswb %%mm1, %%mm0 \n\t" - "pcmpeqd %%mm2, %%mm2 \n\t" - "paddb %%mm2, %%mm2 \n\t" - PAVGB(%%mm3, %%mm0, %%mm1, %%mm2) - "movq %%mm1, (%2, %%"REG_a") \n\t" - "add %3, %%"REG_a" \n\t" - - "subl $2, %0 \n\t" - "jnz 1b \n\t" - :"+g"(h), "+S"(pixels) - :"D"(block), "r"((long)line_size) - :REG_a, "memory"); -} - -//FIXME optimize -static void DEF(put, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put, pixels8_y2)(block , pixels , line_size, h); - DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h); -} - -static void DEF(put, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(put, pixels8_xy2)(block , pixels , line_size, h); - DEF(put, pixels8_xy2)(block+8, pixels+8, line_size, h); -} - -static void DEF(avg, pixels16_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg, pixels8_y2)(block , pixels , line_size, h); - DEF(avg, pixels8_y2)(block+8, pixels+8, line_size, h); -} - -static void DEF(avg, pixels16_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){ - DEF(avg, pixels8_xy2)(block , pixels , line_size, h); - DEF(avg, pixels8_xy2)(block+8, pixels+8, line_size, h); -} - - diff --git a/src/libffmpeg/libavcodec/i386/fdct_mmx.c b/src/libffmpeg/libavcodec/i386/fdct_mmx.c deleted file mode 100644 index 7e2682a4a..000000000 --- a/src/libffmpeg/libavcodec/i386/fdct_mmx.c +++ /dev/null @@ -1,566 +0,0 @@ -/* - * MMX optimized forward DCT - * The gcc porting is Copyright (c) 2001 Fabrice Bellard. - * cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> - * SSE2 optimization is Copyright (c) 2004 Denes Balatoni. - * - * from fdctam32.c - AP922 MMX(3D-Now) forward-DCT - * - * Intel Application Note AP-922 - fast, precise implementation of DCT - * http://developer.intel.com/vtune/cbts/appnotes.htm - * - * Also of inspiration: - * a page about fdct at http://www.geocities.com/ssavekar/dct.htm - * Skal's fdct at http://skal.planet-d.net/coding/dct.html - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "common.h" -#include "../dsputil.h" -#include "mmx.h" - -#define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align))) - -////////////////////////////////////////////////////////////////////// -// -// constants for the forward DCT -// ----------------------------- -// -// Be sure to check that your compiler is aligning all constants to QWORD -// (8-byte) memory boundaries! Otherwise the unaligned memory access will -// severely stall MMX execution. -// -////////////////////////////////////////////////////////////////////// - -#define BITS_FRW_ACC 3 //; 2 or 3 for accuracy -#define SHIFT_FRW_COL BITS_FRW_ACC -#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17 - 3) -#define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1)) -//#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1)) - -//concatenated table, for forward DCT transformation -static const int16_t fdct_tg_all_16[] ATTR_ALIGN(8) = { - 13036, 13036, 13036, 13036, // tg * (2<<16) + 0.5 - 27146, 27146, 27146, 27146, // tg * (2<<16) + 0.5 - -21746, -21746, -21746, -21746, // tg * (2<<16) + 0.5 -}; - -static const int16_t ocos_4_16[4] ATTR_ALIGN(8) = { - 23170, 23170, 23170, 23170, //cos * (2<<15) + 0.5 -}; - -static const int64_t fdct_one_corr ATTR_ALIGN(8) = 0x0001000100010001LL; - -static const int32_t fdct_r_row[2] ATTR_ALIGN(8) = {RND_FRW_ROW, RND_FRW_ROW }; - -static struct -{ - const int32_t fdct_r_row_sse2[4] ATTR_ALIGN(16); -} fdct_r_row_sse2 ATTR_ALIGN(16)= -{{ - RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW -}}; -//static const long fdct_r_row_sse2[4] ATTR_ALIGN(16) = {RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW}; - -static const int16_t tab_frw_01234567[] ATTR_ALIGN(8) = { // forward_dct coeff table - 16384, 16384, 22725, 19266, - 16384, 16384, 12873, 4520, - 21407, 8867, 19266, -4520, - -8867, -21407, -22725, -12873, - 16384, -16384, 12873, -22725, - -16384, 16384, 4520, 19266, - 8867, -21407, 4520, -12873, - 21407, -8867, 19266, -22725, - - 22725, 22725, 31521, 26722, - 22725, 22725, 17855, 6270, - 29692, 12299, 26722, -6270, - -12299, -29692, -31521, -17855, - 22725, -22725, 17855, -31521, - -22725, 22725, 6270, 26722, - 12299, -29692, 6270, -17855, - 29692, -12299, 26722, -31521, - - 21407, 21407, 29692, 25172, - 21407, 21407, 16819, 5906, - 27969, 11585, 25172, -5906, - -11585, -27969, -29692, -16819, - 21407, -21407, 16819, -29692, - -21407, 21407, 5906, 25172, - 11585, -27969, 5906, -16819, - 27969, -11585, 25172, -29692, - - 19266, 19266, 26722, 22654, - 19266, 19266, 15137, 5315, - 25172, 10426, 22654, -5315, - -10426, -25172, -26722, -15137, - 19266, -19266, 15137, -26722, - -19266, 19266, 5315, 22654, - 10426, -25172, 5315, -15137, - 25172, -10426, 22654, -26722, - - 16384, 16384, 22725, 19266, - 16384, 16384, 12873, 4520, - 21407, 8867, 19266, -4520, - -8867, -21407, -22725, -12873, - 16384, -16384, 12873, -22725, - -16384, 16384, 4520, 19266, - 8867, -21407, 4520, -12873, - 21407, -8867, 19266, -22725, - - 19266, 19266, 26722, 22654, - 19266, 19266, 15137, 5315, - 25172, 10426, 22654, -5315, - -10426, -25172, -26722, -15137, - 19266, -19266, 15137, -26722, - -19266, 19266, 5315, 22654, - 10426, -25172, 5315, -15137, - 25172, -10426, 22654, -26722, - - 21407, 21407, 29692, 25172, - 21407, 21407, 16819, 5906, - 27969, 11585, 25172, -5906, - -11585, -27969, -29692, -16819, - 21407, -21407, 16819, -29692, - -21407, 21407, 5906, 25172, - 11585, -27969, 5906, -16819, - 27969, -11585, 25172, -29692, - - 22725, 22725, 31521, 26722, - 22725, 22725, 17855, 6270, - 29692, 12299, 26722, -6270, - -12299, -29692, -31521, -17855, - 22725, -22725, 17855, -31521, - -22725, 22725, 6270, 26722, - 12299, -29692, 6270, -17855, - 29692, -12299, 26722, -31521, -}; - -static struct -{ - const int16_t tab_frw_01234567_sse2[256] ATTR_ALIGN(16); -} tab_frw_01234567_sse2 ATTR_ALIGN(16) = -{{ -//static const int16_t tab_frw_01234567_sse2[] ATTR_ALIGN(16) = { // forward_dct coeff table -#define TABLE_SSE2 C4, C4, C1, C3, -C6, -C2, -C1, -C5, \ - C4, C4, C5, C7, C2, C6, C3, -C7, \ - -C4, C4, C7, C3, C6, -C2, C7, -C5, \ - C4, -C4, C5, -C1, C2, -C6, C3, -C1, -// c1..c7 * cos(pi/4) * 2^15 -#define C1 22725 -#define C2 21407 -#define C3 19266 -#define C4 16384 -#define C5 12873 -#define C6 8867 -#define C7 4520 -TABLE_SSE2 - -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 -#define C1 31521 -#define C2 29692 -#define C3 26722 -#define C4 22725 -#define C5 17855 -#define C6 12299 -#define C7 6270 -TABLE_SSE2 - -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 -#define C1 29692 -#define C2 27969 -#define C3 25172 -#define C4 21407 -#define C5 16819 -#define C6 11585 -#define C7 5906 -TABLE_SSE2 - -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 -#define C1 26722 -#define C2 25172 -#define C3 22654 -#define C4 19266 -#define C5 15137 -#define C6 10426 -#define C7 5315 -TABLE_SSE2 - -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 -#define C1 22725 -#define C2 21407 -#define C3 19266 -#define C4 16384 -#define C5 12873 -#define C6 8867 -#define C7 4520 -TABLE_SSE2 - -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 -#define C1 26722 -#define C2 25172 -#define C3 22654 -#define C4 19266 -#define C5 15137 -#define C6 10426 -#define C7 5315 -TABLE_SSE2 - -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 -#define C1 29692 -#define C2 27969 -#define C3 25172 -#define C4 21407 -#define C5 16819 -#define C6 11585 -#define C7 5906 -TABLE_SSE2 - -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 -#define C1 31521 -#define C2 29692 -#define C3 26722 -#define C4 22725 -#define C5 17855 -#define C6 12299 -#define C7 6270 -TABLE_SSE2 -}}; - - -static av_always_inline void fdct_col(const int16_t *in, int16_t *out, int offset) -{ - movq_m2r(*(in + offset + 1 * 8), mm0); - movq_m2r(*(in + offset + 6 * 8), mm1); - movq_r2r(mm0, mm2); - movq_m2r(*(in + offset + 2 * 8), mm3); - paddsw_r2r(mm1, mm0); - movq_m2r(*(in + offset + 5 * 8), mm4); - psllw_i2r(SHIFT_FRW_COL, mm0); - movq_m2r(*(in + offset + 0 * 8), mm5); - paddsw_r2r(mm3, mm4); - paddsw_m2r(*(in + offset + 7 * 8), mm5); - psllw_i2r(SHIFT_FRW_COL, mm4); - movq_r2r(mm0, mm6); - psubsw_r2r(mm1, mm2); - movq_m2r(*(fdct_tg_all_16 + 4), mm1); - psubsw_r2r(mm4, mm0); - movq_m2r(*(in + offset + 3 * 8), mm7); - pmulhw_r2r(mm0, mm1); - paddsw_m2r(*(in + offset + 4 * 8), mm7); - psllw_i2r(SHIFT_FRW_COL, mm5); - paddsw_r2r(mm4, mm6); - psllw_i2r(SHIFT_FRW_COL, mm7); - movq_r2r(mm5, mm4); - psubsw_r2r(mm7, mm5); - paddsw_r2r(mm5, mm1); - paddsw_r2r(mm7, mm4); - por_m2r(fdct_one_corr, mm1); - psllw_i2r(SHIFT_FRW_COL + 1, mm2); - pmulhw_m2r(*(fdct_tg_all_16 + 4), mm5); - movq_r2r(mm4, mm7); - psubsw_m2r(*(in + offset + 5 * 8), mm3); - psubsw_r2r(mm6, mm4); - movq_r2m(mm1, *(out + offset + 2 * 8)); - paddsw_r2r(mm6, mm7); - movq_m2r(*(in + offset + 3 * 8), mm1); - psllw_i2r(SHIFT_FRW_COL + 1, mm3); - psubsw_m2r(*(in + offset + 4 * 8), mm1); - movq_r2r(mm2, mm6); - movq_r2m(mm4, *(out + offset + 4 * 8)); - paddsw_r2r(mm3, mm2); - pmulhw_m2r(*ocos_4_16, mm2); - psubsw_r2r(mm3, mm6); - pmulhw_m2r(*ocos_4_16, mm6); - psubsw_r2r(mm0, mm5); - por_m2r(fdct_one_corr, mm5); - psllw_i2r(SHIFT_FRW_COL, mm1); - por_m2r(fdct_one_corr, mm2); - movq_r2r(mm1, mm4); - movq_m2r(*(in + offset + 0 * 8), mm3); - paddsw_r2r(mm6, mm1); - psubsw_m2r(*(in + offset + 7 * 8), mm3); - psubsw_r2r(mm6, mm4); - movq_m2r(*(fdct_tg_all_16 + 0), mm0); - psllw_i2r(SHIFT_FRW_COL, mm3); - movq_m2r(*(fdct_tg_all_16 + 8), mm6); - pmulhw_r2r(mm1, mm0); - movq_r2m(mm7, *(out + offset + 0 * 8)); - pmulhw_r2r(mm4, mm6); - movq_r2m(mm5, *(out + offset + 6 * 8)); - movq_r2r(mm3, mm7); - movq_m2r(*(fdct_tg_all_16 + 8), mm5); - psubsw_r2r(mm2, mm7); - paddsw_r2r(mm2, mm3); - pmulhw_r2r(mm7, mm5); - paddsw_r2r(mm3, mm0); - paddsw_r2r(mm4, mm6); - pmulhw_m2r(*(fdct_tg_all_16 + 0), mm3); - por_m2r(fdct_one_corr, mm0); - paddsw_r2r(mm7, mm5); - psubsw_r2r(mm6, mm7); - movq_r2m(mm0, *(out + offset + 1 * 8)); - paddsw_r2r(mm4, mm5); - movq_r2m(mm7, *(out + offset + 3 * 8)); - psubsw_r2r(mm1, mm3); - movq_r2m(mm5, *(out + offset + 5 * 8)); - movq_r2m(mm3, *(out + offset + 7 * 8)); -} - - -static av_always_inline void fdct_row_sse2(const int16_t *in, int16_t *out) -{ - asm volatile( -#define FDCT_ROW_SSE2_H1(i,t) \ - "movq " #i "(%0), %%xmm2 \n\t" \ - "movq " #i "+8(%0), %%xmm0 \n\t" \ - "movdqa " #t "+32(%1), %%xmm3 \n\t" \ - "movdqa " #t "+48(%1), %%xmm7 \n\t" \ - "movdqa " #t "(%1), %%xmm4 \n\t" \ - "movdqa " #t "+16(%1), %%xmm5 \n\t" - -#define FDCT_ROW_SSE2_H2(i,t) \ - "movq " #i "(%0), %%xmm2 \n\t" \ - "movq " #i "+8(%0), %%xmm0 \n\t" \ - "movdqa " #t "+32(%1), %%xmm3 \n\t" \ - "movdqa " #t "+48(%1), %%xmm7 \n\t" - -#define FDCT_ROW_SSE2(i) \ - "movq %%xmm2, %%xmm1 \n\t" \ - "pshuflw $27, %%xmm0, %%xmm0 \n\t" \ - "paddsw %%xmm0, %%xmm1 \n\t" \ - "psubsw %%xmm0, %%xmm2 \n\t" \ - "punpckldq %%xmm2, %%xmm1 \n\t" \ - "pshufd $78, %%xmm1, %%xmm2 \n\t" \ - "pmaddwd %%xmm2, %%xmm3 \n\t" \ - "pmaddwd %%xmm1, %%xmm7 \n\t" \ - "pmaddwd %%xmm5, %%xmm2 \n\t" \ - "pmaddwd %%xmm4, %%xmm1 \n\t" \ - "paddd %%xmm7, %%xmm3 \n\t" \ - "paddd %%xmm2, %%xmm1 \n\t" \ - "paddd %%xmm6, %%xmm3 \n\t" \ - "paddd %%xmm6, %%xmm1 \n\t" \ - "psrad %3, %%xmm3 \n\t" \ - "psrad %3, %%xmm1 \n\t" \ - "packssdw %%xmm3, %%xmm1 \n\t" \ - "movdqa %%xmm1, " #i "(%4) \n\t" - - "movdqa (%2), %%xmm6 \n\t" - FDCT_ROW_SSE2_H1(0,0) - FDCT_ROW_SSE2(0) - FDCT_ROW_SSE2_H2(64,0) - FDCT_ROW_SSE2(64) - - FDCT_ROW_SSE2_H1(16,64) - FDCT_ROW_SSE2(16) - FDCT_ROW_SSE2_H2(112,64) - FDCT_ROW_SSE2(112) - - FDCT_ROW_SSE2_H1(32,128) - FDCT_ROW_SSE2(32) - FDCT_ROW_SSE2_H2(96,128) - FDCT_ROW_SSE2(96) - - FDCT_ROW_SSE2_H1(48,192) - FDCT_ROW_SSE2(48) - FDCT_ROW_SSE2_H2(80,192) - FDCT_ROW_SSE2(80) - : - : "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2), "r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out) - ); -} - -static av_always_inline void fdct_row_mmx2(const int16_t *in, int16_t *out, const int16_t *table) -{ - pshufw_m2r(*(in + 4), mm5, 0x1B); - movq_m2r(*(in + 0), mm0); - movq_r2r(mm0, mm1); - paddsw_r2r(mm5, mm0); - psubsw_r2r(mm5, mm1); - movq_r2r(mm0, mm2); - punpckldq_r2r(mm1, mm0); - punpckhdq_r2r(mm1, mm2); - movq_m2r(*(table + 0), mm1); - movq_m2r(*(table + 4), mm3); - movq_m2r(*(table + 8), mm4); - movq_m2r(*(table + 12), mm5); - movq_m2r(*(table + 16), mm6); - movq_m2r(*(table + 20), mm7); - pmaddwd_r2r(mm0, mm1); - pmaddwd_r2r(mm2, mm3); - pmaddwd_r2r(mm0, mm4); - pmaddwd_r2r(mm2, mm5); - pmaddwd_r2r(mm0, mm6); - pmaddwd_r2r(mm2, mm7); - pmaddwd_m2r(*(table + 24), mm0); - pmaddwd_m2r(*(table + 28), mm2); - paddd_r2r(mm1, mm3); - paddd_r2r(mm4, mm5); - paddd_r2r(mm6, mm7); - paddd_r2r(mm0, mm2); - movq_m2r(*fdct_r_row, mm0); - paddd_r2r(mm0, mm3); - paddd_r2r(mm0, mm5); - paddd_r2r(mm0, mm7); - paddd_r2r(mm0, mm2); - psrad_i2r(SHIFT_FRW_ROW, mm3); - psrad_i2r(SHIFT_FRW_ROW, mm5); - psrad_i2r(SHIFT_FRW_ROW, mm7); - psrad_i2r(SHIFT_FRW_ROW, mm2); - packssdw_r2r(mm5, mm3); - packssdw_r2r(mm2, mm7); - movq_r2m(mm3, *(out + 0)); - movq_r2m(mm7, *(out + 4)); -} - -static av_always_inline void fdct_row_mmx(const int16_t *in, int16_t *out, const int16_t *table) -{ -//FIXME reorder (i dont have a old mmx only cpu here to benchmark ...) - movd_m2r(*(in + 6), mm1); - punpcklwd_m2r(*(in + 4), mm1); - movq_r2r(mm1, mm2); - psrlq_i2r(0x20, mm1); - movq_m2r(*(in + 0), mm0); - punpcklwd_r2r(mm2, mm1); - movq_r2r(mm0, mm5); - paddsw_r2r(mm1, mm0); - psubsw_r2r(mm1, mm5); - movq_r2r(mm0, mm2); - punpckldq_r2r(mm5, mm0); - punpckhdq_r2r(mm5, mm2); - movq_m2r(*(table + 0), mm1); - movq_m2r(*(table + 4), mm3); - movq_m2r(*(table + 8), mm4); - movq_m2r(*(table + 12), mm5); - movq_m2r(*(table + 16), mm6); - movq_m2r(*(table + 20), mm7); - pmaddwd_r2r(mm0, mm1); - pmaddwd_r2r(mm2, mm3); - pmaddwd_r2r(mm0, mm4); - pmaddwd_r2r(mm2, mm5); - pmaddwd_r2r(mm0, mm6); - pmaddwd_r2r(mm2, mm7); - pmaddwd_m2r(*(table + 24), mm0); - pmaddwd_m2r(*(table + 28), mm2); - paddd_r2r(mm1, mm3); - paddd_r2r(mm4, mm5); - paddd_r2r(mm6, mm7); - paddd_r2r(mm0, mm2); - movq_m2r(*fdct_r_row, mm0); - paddd_r2r(mm0, mm3); - paddd_r2r(mm0, mm5); - paddd_r2r(mm0, mm7); - paddd_r2r(mm0, mm2); - psrad_i2r(SHIFT_FRW_ROW, mm3); - psrad_i2r(SHIFT_FRW_ROW, mm5); - psrad_i2r(SHIFT_FRW_ROW, mm7); - psrad_i2r(SHIFT_FRW_ROW, mm2); - packssdw_r2r(mm5, mm3); - packssdw_r2r(mm2, mm7); - movq_r2m(mm3, *(out + 0)); - movq_r2m(mm7, *(out + 4)); -} - -void ff_fdct_mmx(int16_t *block) -{ - int64_t align_tmp[16] ATTR_ALIGN(8); - int16_t * block1= (int16_t*)align_tmp; - const int16_t *table= tab_frw_01234567; - int i; - - fdct_col(block, block1, 0); - fdct_col(block, block1, 4); - - for(i=8;i>0;i--) { - fdct_row_mmx(block1, block, table); - block1 += 8; - table += 32; - block += 8; - } -} - -void ff_fdct_mmx2(int16_t *block) -{ - int64_t align_tmp[16] ATTR_ALIGN(8); - int16_t *block1= (int16_t*)align_tmp; - const int16_t *table= tab_frw_01234567; - int i; - - fdct_col(block, block1, 0); - fdct_col(block, block1, 4); - - for(i=8;i>0;i--) { - fdct_row_mmx2(block1, block, table); - block1 += 8; - table += 32; - block += 8; - } -} - -void ff_fdct_sse2(int16_t *block) -{ - int64_t align_tmp[16] ATTR_ALIGN(16); - int16_t * const block1= (int16_t*)align_tmp; - - fdct_col(block, block1, 0); - fdct_col(block, block1, 4); - - fdct_row_sse2(block1, block); -} - diff --git a/src/libffmpeg/libavcodec/i386/fft_3dn.c b/src/libffmpeg/libavcodec/i386/fft_3dn.c deleted file mode 100644 index 8087f1932..000000000 --- a/src/libffmpeg/libavcodec/i386/fft_3dn.c +++ /dev/null @@ -1,125 +0,0 @@ -/* - * FFT/MDCT transform with 3DNow! optimizations - * Copyright (c) 2006 Zuxy MENG Jie, Loren Merritt - * Based on fft_sse.c copyright (c) 2002 Fabrice Bellard. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "../dsputil.h" - -static const int p1m1[2] __attribute__((aligned(8))) = - { 0, 1 << 31 }; - -static const int m1p1[2] __attribute__((aligned(8))) = - { 1 << 31, 0 }; - -void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z) -{ - int ln = s->nbits; - long i, j; - long nblocks, nloops; - FFTComplex *p, *cptr; - - asm volatile( - /* FEMMS is not a must here but recommended by AMD */ - "femms \n\t" - "movq %0, %%mm7 \n\t" - ::"m"(*(s->inverse ? m1p1 : p1m1)) - ); - - i = 8 << ln; - asm volatile( - "1: \n\t" - "sub $32, %0 \n\t" - "movq (%0,%1), %%mm0 \n\t" - "movq 16(%0,%1), %%mm1 \n\t" - "movq 8(%0,%1), %%mm2 \n\t" - "movq 24(%0,%1), %%mm3 \n\t" - "movq %%mm0, %%mm4 \n\t" - "movq %%mm1, %%mm5 \n\t" - "pfadd %%mm2, %%mm0 \n\t" - "pfadd %%mm3, %%mm1 \n\t" - "pfsub %%mm2, %%mm4 \n\t" - "pfsub %%mm3, %%mm5 \n\t" - "movq %%mm0, %%mm2 \n\t" - "punpckldq %%mm5, %%mm6 \n\t" - "punpckhdq %%mm6, %%mm5 \n\t" - "movq %%mm4, %%mm3 \n\t" - "pxor %%mm7, %%mm5 \n\t" - "pfadd %%mm1, %%mm0 \n\t" - "pfadd %%mm5, %%mm4 \n\t" - "pfsub %%mm1, %%mm2 \n\t" - "pfsub %%mm5, %%mm3 \n\t" - "movq %%mm0, (%0,%1) \n\t" - "movq %%mm4, 8(%0,%1) \n\t" - "movq %%mm2, 16(%0,%1) \n\t" - "movq %%mm3, 24(%0,%1) \n\t" - "jg 1b \n\t" - :"+r"(i) - :"r"(z) - ); - /* pass 2 .. ln-1 */ - - nblocks = 1 << (ln-3); - nloops = 1 << 2; - cptr = s->exptab1; - do { - p = z; - j = nblocks; - do { - i = nloops*8; - asm volatile( - "1: \n\t" - "sub $16, %0 \n\t" - "movq (%1,%0), %%mm0 \n\t" - "movq 8(%1,%0), %%mm1 \n\t" - "movq (%2,%0), %%mm2 \n\t" - "movq 8(%2,%0), %%mm3 \n\t" - "movq %%mm2, %%mm4 \n\t" - "movq %%mm3, %%mm5 \n\t" - "punpckldq %%mm2, %%mm2 \n\t" - "punpckldq %%mm3, %%mm3 \n\t" - "punpckhdq %%mm4, %%mm4 \n\t" - "punpckhdq %%mm5, %%mm5 \n\t" - "pfmul (%3,%0,2), %%mm2 \n\t" // cre*re cim*re - "pfmul 8(%3,%0,2), %%mm3 \n\t" - "pfmul 16(%3,%0,2), %%mm4 \n\t" // -cim*im cre*im - "pfmul 24(%3,%0,2), %%mm5 \n\t" - "pfadd %%mm2, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im - "pfadd %%mm3, %%mm5 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm1, %%mm3 \n\t" - "pfadd %%mm4, %%mm0 \n\t" - "pfadd %%mm5, %%mm1 \n\t" - "pfsub %%mm4, %%mm2 \n\t" - "pfsub %%mm5, %%mm3 \n\t" - "movq %%mm0, (%1,%0) \n\t" - "movq %%mm1, 8(%1,%0) \n\t" - "movq %%mm2, (%2,%0) \n\t" - "movq %%mm3, 8(%2,%0) \n\t" - "jg 1b \n\t" - :"+r"(i) - :"r"(p), "r"(p + nloops), "r"(cptr) - ); - p += nloops*2; - } while (--j); - cptr += nloops*2; - nblocks >>= 1; - nloops <<= 1; - } while (nblocks != 0); - asm volatile("femms"); -} diff --git a/src/libffmpeg/libavcodec/i386/fft_3dn2.c b/src/libffmpeg/libavcodec/i386/fft_3dn2.c deleted file mode 100644 index a4fe5f0b6..000000000 --- a/src/libffmpeg/libavcodec/i386/fft_3dn2.c +++ /dev/null @@ -1,210 +0,0 @@ -/* - * FFT/MDCT transform with Extended 3DNow! optimizations - * Copyright (c) 2006 Zuxy MENG Jie, Loren Merritt - * Based on fft_sse.c copyright (c) 2002 Fabrice Bellard. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "../dsputil.h" - -static const int p1m1[2] __attribute__((aligned(8))) = - { 0, 1 << 31 }; - -static const int m1p1[2] __attribute__((aligned(8))) = - { 1 << 31, 0 }; - -void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z) -{ - int ln = s->nbits; - long i, j; - long nblocks, nloops; - FFTComplex *p, *cptr; - - asm volatile( - /* FEMMS is not a must here but recommended by AMD */ - "femms \n\t" - "movq %0, %%mm7 \n\t" - ::"m"(*(s->inverse ? m1p1 : p1m1)) - ); - - i = 8 << ln; - asm volatile( - "1: \n\t" - "sub $32, %0 \n\t" - "movq (%0,%1), %%mm0 \n\t" - "movq 16(%0,%1), %%mm1 \n\t" - "movq 8(%0,%1), %%mm2 \n\t" - "movq 24(%0,%1), %%mm3 \n\t" - "movq %%mm0, %%mm4 \n\t" - "movq %%mm1, %%mm5 \n\t" - "pfadd %%mm2, %%mm0 \n\t" - "pfadd %%mm3, %%mm1 \n\t" - "pfsub %%mm2, %%mm4 \n\t" - "pfsub %%mm3, %%mm5 \n\t" - "movq %%mm0, %%mm2 \n\t" - "pswapd %%mm5, %%mm5 \n\t" - "movq %%mm4, %%mm3 \n\t" - "pxor %%mm7, %%mm5 \n\t" - "pfadd %%mm1, %%mm0 \n\t" - "pfadd %%mm5, %%mm4 \n\t" - "pfsub %%mm1, %%mm2 \n\t" - "pfsub %%mm5, %%mm3 \n\t" - "movq %%mm0, (%0,%1) \n\t" - "movq %%mm4, 8(%0,%1) \n\t" - "movq %%mm2, 16(%0,%1) \n\t" - "movq %%mm3, 24(%0,%1) \n\t" - "jg 1b \n\t" - :"+r"(i) - :"r"(z) - ); - /* pass 2 .. ln-1 */ - - nblocks = 1 << (ln-3); - nloops = 1 << 2; - cptr = s->exptab1; - do { - p = z; - j = nblocks; - do { - i = nloops*8; - asm volatile( - "1: \n\t" - "sub $16, %0 \n\t" - "movq (%1,%0), %%mm0 \n\t" - "movq 8(%1,%0), %%mm1 \n\t" - "movq (%2,%0), %%mm2 \n\t" - "movq 8(%2,%0), %%mm3 \n\t" - "movq (%3,%0,2), %%mm4 \n\t" - "movq 8(%3,%0,2), %%mm5 \n\t" - "pswapd %%mm4, %%mm6 \n\t" // no need for cptr[2] & cptr[3] - "pswapd %%mm5, %%mm7 \n\t" - "pfmul %%mm2, %%mm4 \n\t" // cre*re cim*im - "pfmul %%mm3, %%mm5 \n\t" - "pfmul %%mm2, %%mm6 \n\t" // cim*re cre*im - "pfmul %%mm3, %%mm7 \n\t" - "pfpnacc %%mm6, %%mm4 \n\t" // cre*re-cim*im cim*re+cre*im - "pfpnacc %%mm7, %%mm5 \n\t" - "movq %%mm0, %%mm2 \n\t" - "movq %%mm1, %%mm3 \n\t" - "pfadd %%mm4, %%mm0 \n\t" - "pfadd %%mm5, %%mm1 \n\t" - "pfsub %%mm4, %%mm2 \n\t" - "pfsub %%mm5, %%mm3 \n\t" - "movq %%mm0, (%1,%0) \n\t" - "movq %%mm1, 8(%1,%0) \n\t" - "movq %%mm2, (%2,%0) \n\t" - "movq %%mm3, 8(%2,%0) \n\t" - "jg 1b \n\t" - :"+r"(i) - :"r"(p), "r"(p + nloops), "r"(cptr) - ); - p += nloops*2; - } while (--j); - cptr += nloops*2; - nblocks >>= 1; - nloops <<= 1; - } while (nblocks != 0); - asm volatile("femms"); -} - -void ff_imdct_calc_3dn2(MDCTContext *s, FFTSample *output, - const FFTSample *input, FFTSample *tmp) -{ - long k, n8, n4, n2, n; - const uint16_t *revtab = s->fft.revtab; - const FFTSample *tcos = s->tcos; - const FFTSample *tsin = s->tsin; - const FFTSample *in1, *in2; - FFTComplex *z = (FFTComplex *)tmp; - - n = 1 << s->nbits; - n2 = n >> 1; - n4 = n >> 2; - n8 = n >> 3; - - /* pre rotation */ - in1 = input; - in2 = input + n2 - 1; - for(k = 0; k < n4; k++) { - // FIXME a single block is faster, but gcc 2.95 and 3.4.x on 32bit can't compile it - asm volatile( - "movd %0, %%mm0 \n\t" - "movd %2, %%mm1 \n\t" - "punpckldq %1, %%mm0 \n\t" - "punpckldq %3, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "pfmul %%mm1, %%mm0 \n\t" - "pswapd %%mm1, %%mm1 \n\t" - "pfmul %%mm1, %%mm2 \n\t" - "pfpnacc %%mm2, %%mm0 \n\t" - ::"m"(in2[-2*k]), "m"(in1[2*k]), - "m"(tcos[k]), "m"(tsin[k]) - ); - asm volatile( - "movq %%mm0, %0 \n\t" - :"=m"(z[revtab[k]]) - ); - } - - ff_fft_calc(&s->fft, z); - - /* post rotation + reordering */ - for(k = 0; k < n4; k++) { - asm volatile( - "movq %0, %%mm0 \n\t" - "movd %1, %%mm1 \n\t" - "punpckldq %2, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "pfmul %%mm1, %%mm0 \n\t" - "pswapd %%mm1, %%mm1 \n\t" - "pfmul %%mm1, %%mm2 \n\t" - "pfpnacc %%mm2, %%mm0 \n\t" - "movq %%mm0, %0 \n\t" - :"+m"(z[k]) - :"m"(tcos[k]), "m"(tsin[k]) - ); - } - - k = n-8; - asm volatile("movd %0, %%mm7" ::"r"(1<<31)); - asm volatile( - "1: \n\t" - "movq (%4,%0), %%mm0 \n\t" // z[n8+k] - "neg %0 \n\t" - "pswapd -8(%4,%0), %%mm1 \n\t" // z[n8-1-k] - "movq %%mm0, %%mm2 \n\t" - "pxor %%mm7, %%mm2 \n\t" - "punpckldq %%mm1, %%mm2 \n\t" - "pswapd %%mm2, %%mm3 \n\t" - "punpckhdq %%mm1, %%mm0 \n\t" - "pswapd %%mm0, %%mm4 \n\t" - "pxor %%mm7, %%mm0 \n\t" - "pxor %%mm7, %%mm4 \n\t" - "movq %%mm3, -8(%3,%0) \n\t" // output[n-2-2*k] = { z[n8-1-k].im, -z[n8+k].re } - "movq %%mm4, -8(%2,%0) \n\t" // output[n2-2-2*k]= { -z[n8-1-k].re, z[n8+k].im } - "neg %0 \n\t" - "movq %%mm0, (%1,%0) \n\t" // output[2*k] = { -z[n8+k].im, z[n8-1-k].re } - "movq %%mm2, (%2,%0) \n\t" // output[n2+2*k] = { -z[n8+k].re, z[n8-1-k].im } - "sub $8, %0 \n\t" - "jge 1b \n\t" - :"+r"(k) - :"r"(output), "r"(output+n2), "r"(output+n), "r"(z+n8) - :"memory" - ); - asm volatile("femms"); -} - diff --git a/src/libffmpeg/libavcodec/i386/fft_sse.c b/src/libffmpeg/libavcodec/i386/fft_sse.c deleted file mode 100644 index 0dc0c61c1..000000000 --- a/src/libffmpeg/libavcodec/i386/fft_sse.c +++ /dev/null @@ -1,247 +0,0 @@ -/* - * FFT/MDCT transform with SSE optimizations - * Copyright (c) 2002 Fabrice Bellard. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "../dsputil.h" - -static const int p1p1p1m1[4] __attribute__((aligned(16))) = - { 0, 0, 0, 1 << 31 }; - -static const int p1p1m1p1[4] __attribute__((aligned(16))) = - { 0, 0, 1 << 31, 0 }; - -static const int p1p1m1m1[4] __attribute__((aligned(16))) = - { 0, 0, 1 << 31, 1 << 31 }; - -static const int p1m1p1m1[4] __attribute__((aligned(16))) = - { 0, 1 << 31, 0, 1 << 31 }; - -static const int m1m1m1m1[4] __attribute__((aligned(16))) = - { 1 << 31, 1 << 31, 1 << 31, 1 << 31 }; - -#if 0 -static void print_v4sf(const char *str, __m128 a) -{ - float *p = (float *)&a; - printf("%s: %f %f %f %f\n", - str, p[0], p[1], p[2], p[3]); -} -#endif - -/* XXX: handle reverse case */ -void ff_fft_calc_sse(FFTContext *s, FFTComplex *z) -{ - int ln = s->nbits; - long i, j; - long nblocks, nloops; - FFTComplex *p, *cptr; - - asm volatile( - "movaps %0, %%xmm4 \n\t" - "movaps %1, %%xmm5 \n\t" - ::"m"(*p1p1m1m1), - "m"(*(s->inverse ? p1p1m1p1 : p1p1p1m1)) - ); - - i = 8 << ln; - asm volatile( - "1: \n\t" - "sub $32, %0 \n\t" - /* do the pass 0 butterfly */ - "movaps (%0,%1), %%xmm0 \n\t" - "movaps %%xmm0, %%xmm1 \n\t" - "shufps $0x4E, %%xmm0, %%xmm0 \n\t" - "xorps %%xmm4, %%xmm1 \n\t" - "addps %%xmm1, %%xmm0 \n\t" - "movaps 16(%0,%1), %%xmm2 \n\t" - "movaps %%xmm2, %%xmm3 \n\t" - "shufps $0x4E, %%xmm2, %%xmm2 \n\t" - "xorps %%xmm4, %%xmm3 \n\t" - "addps %%xmm3, %%xmm2 \n\t" - /* multiply third by -i */ - /* by toggling the sign bit */ - "shufps $0xB4, %%xmm2, %%xmm2 \n\t" - "xorps %%xmm5, %%xmm2 \n\t" - /* do the pass 1 butterfly */ - "movaps %%xmm0, %%xmm1 \n\t" - "addps %%xmm2, %%xmm0 \n\t" - "subps %%xmm2, %%xmm1 \n\t" - "movaps %%xmm0, (%0,%1) \n\t" - "movaps %%xmm1, 16(%0,%1) \n\t" - "jg 1b \n\t" - :"+r"(i) - :"r"(z) - ); - /* pass 2 .. ln-1 */ - - nblocks = 1 << (ln-3); - nloops = 1 << 2; - cptr = s->exptab1; - do { - p = z; - j = nblocks; - do { - i = nloops*8; - asm volatile( - "1: \n\t" - "sub $16, %0 \n\t" - "movaps (%2,%0), %%xmm1 \n\t" - "movaps (%1,%0), %%xmm0 \n\t" - "movaps %%xmm1, %%xmm2 \n\t" - "shufps $0xA0, %%xmm1, %%xmm1 \n\t" - "shufps $0xF5, %%xmm2, %%xmm2 \n\t" - "mulps (%3,%0,2), %%xmm1 \n\t" // cre*re cim*re - "mulps 16(%3,%0,2), %%xmm2 \n\t" // -cim*im cre*im - "addps %%xmm2, %%xmm1 \n\t" - "movaps %%xmm0, %%xmm3 \n\t" - "addps %%xmm1, %%xmm0 \n\t" - "subps %%xmm1, %%xmm3 \n\t" - "movaps %%xmm0, (%1,%0) \n\t" - "movaps %%xmm3, (%2,%0) \n\t" - "jg 1b \n\t" - :"+r"(i) - :"r"(p), "r"(p + nloops), "r"(cptr) - ); - p += nloops*2; - } while (--j); - cptr += nloops*2; - nblocks >>= 1; - nloops <<= 1; - } while (nblocks != 0); -} - -void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output, - const FFTSample *input, FFTSample *tmp) -{ - long k, n8, n4, n2, n; - const uint16_t *revtab = s->fft.revtab; - const FFTSample *tcos = s->tcos; - const FFTSample *tsin = s->tsin; - const FFTSample *in1, *in2; - FFTComplex *z = (FFTComplex *)tmp; - - n = 1 << s->nbits; - n2 = n >> 1; - n4 = n >> 2; - n8 = n >> 3; - - asm volatile ("movaps %0, %%xmm7\n\t"::"m"(*p1m1p1m1)); - - /* pre rotation */ - in1 = input; - in2 = input + n2 - 4; - - /* Complex multiplication - Two complex products per iteration, we could have 4 with 8 xmm - registers, 8 with 16 xmm registers. - Maybe we should unroll more. - */ - for (k = 0; k < n4; k += 2) { - asm volatile ( - "movaps %0, %%xmm0 \n\t" // xmm0 = r0 X r1 X : in2 - "movaps %1, %%xmm3 \n\t" // xmm3 = X i1 X i0: in1 - "movlps %2, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos - "movlps %3, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin - "shufps $95, %%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0 - "shufps $160,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0 - "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0 - "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0 - "xorps %%xmm7, %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0 - "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR - "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0 - "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii - "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result - ::"m"(in2[-2*k]), "m"(in1[2*k]), - "m"(tcos[k]), "m"(tsin[k]) - ); - /* Should be in the same block, hack for gcc2.95 & gcc3 */ - asm ( - "movlps %%xmm0, %0 \n\t" - "movhps %%xmm0, %1 \n\t" - :"=m"(z[revtab[k]]), "=m"(z[revtab[k + 1]]) - ); - } - - ff_fft_calc_sse(&s->fft, z); - - /* Not currently needed, added for safety */ - asm volatile ("movaps %0, %%xmm7\n\t"::"m"(*p1m1p1m1)); - - /* post rotation + reordering */ - for (k = 0; k < n4; k += 2) { - asm ( - "movaps %0, %%xmm0 \n\t" // xmm0 = i1 r1 i0 r0: z - "movlps %1, %%xmm1 \n\t" // xmm1 = X X R1 R0: tcos - "movaps %%xmm0, %%xmm3 \n\t" // xmm3 = i1 r1 i0 r0 - "movlps %2, %%xmm2 \n\t" // xmm2 = X X I1 I0: tsin - "shufps $160,%%xmm0, %%xmm0 \n\t" // xmm0 = r1 r1 r0 r0 - "shufps $245,%%xmm3, %%xmm3 \n\t" // xmm3 = i1 i1 i0 i0 - "unpcklps %%xmm2, %%xmm1 \n\t" // xmm1 = I1 R1 I0 R0 - "movaps %%xmm1, %%xmm2 \n\t" // xmm2 = I1 R1 I0 R0 - "xorps %%xmm7, %%xmm2 \n\t" // xmm2 = -I1 R1 -I0 R0 - "mulps %%xmm1, %%xmm0 \n\t" // xmm0 = rI rR rI rR - "shufps $177,%%xmm2, %%xmm2 \n\t" // xmm2 = R1 -I1 R0 -I0 - "mulps %%xmm2, %%xmm3 \n\t" // xmm3 = Ri -Ii Ri -Ii - "addps %%xmm3, %%xmm0 \n\t" // xmm0 = result - "movaps %%xmm0, %0 \n\t" - :"+m"(z[k]) - :"m"(tcos[k]), "m"(tsin[k]) - ); - } - - /* - Mnemonics: - 0 = z[k].re - 1 = z[k].im - 2 = z[k + 1].re - 3 = z[k + 1].im - 4 = z[-k - 2].re - 5 = z[-k - 2].im - 6 = z[-k - 1].re - 7 = z[-k - 1].im - */ - k = 16-n; - asm volatile("movaps %0, %%xmm7 \n\t"::"m"(*m1m1m1m1)); - asm volatile( - "1: \n\t" - "movaps -16(%4,%0), %%xmm1 \n\t" // xmm1 = 4 5 6 7 = z[-2-k] - "neg %0 \n\t" - "movaps (%4,%0), %%xmm0 \n\t" // xmm0 = 0 1 2 3 = z[k] - "xorps %%xmm7, %%xmm0 \n\t" // xmm0 = -0 -1 -2 -3 - "movaps %%xmm0, %%xmm2 \n\t" // xmm2 = -0 -1 -2 -3 - "shufps $141,%%xmm1, %%xmm0 \n\t" // xmm0 = -1 -3 4 6 - "shufps $216,%%xmm1, %%xmm2 \n\t" // xmm2 = -0 -2 5 7 - "shufps $156,%%xmm0, %%xmm0 \n\t" // xmm0 = -1 6 -3 4 ! - "shufps $156,%%xmm2, %%xmm2 \n\t" // xmm2 = -0 7 -2 5 ! - "movaps %%xmm0, (%1,%0) \n\t" // output[2*k] - "movaps %%xmm2, (%2,%0) \n\t" // output[n2+2*k] - "neg %0 \n\t" - "shufps $27, %%xmm0, %%xmm0 \n\t" // xmm0 = 4 -3 6 -1 - "xorps %%xmm7, %%xmm0 \n\t" // xmm0 = -4 3 -6 1 ! - "shufps $27, %%xmm2, %%xmm2 \n\t" // xmm2 = 5 -2 7 -0 ! - "movaps %%xmm0, -16(%2,%0) \n\t" // output[n2-4-2*k] - "movaps %%xmm2, -16(%3,%0) \n\t" // output[n-4-2*k] - "add $16, %0 \n\t" - "jle 1b \n\t" - :"+r"(k) - :"r"(output), "r"(output+n2), "r"(output+n), "r"(z+n8) - :"memory" - ); -} - diff --git a/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c b/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c deleted file mode 100644 index 40baf199b..000000000 --- a/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c +++ /dev/null @@ -1,1513 +0,0 @@ -/* - * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - - -/***********************************/ -/* IDCT */ - -/* in/out: mma=mma+mmb, mmb=mmb-mma */ -#define SUMSUB_BA( a, b ) \ - "paddw "#b", "#a" \n\t"\ - "paddw "#b", "#b" \n\t"\ - "psubw "#a", "#b" \n\t" - -#define SUMSUB_BADC( a, b, c, d ) \ - "paddw "#b", "#a" \n\t"\ - "paddw "#d", "#c" \n\t"\ - "paddw "#b", "#b" \n\t"\ - "paddw "#d", "#d" \n\t"\ - "psubw "#a", "#b" \n\t"\ - "psubw "#c", "#d" \n\t" - -#define SUMSUBD2_AB( a, b, t ) \ - "movq "#b", "#t" \n\t"\ - "psraw $1 , "#b" \n\t"\ - "paddw "#a", "#b" \n\t"\ - "psraw $1 , "#a" \n\t"\ - "psubw "#t", "#a" \n\t" - -#define IDCT4_1D( s02, s13, d02, d13, t ) \ - SUMSUB_BA ( s02, d02 )\ - SUMSUBD2_AB( s13, d13, t )\ - SUMSUB_BADC( d13, s02, s13, d02 ) - -#define TRANSPOSE4(a,b,c,d,t)\ - SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\ - SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\ - SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\ - SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */ - -#define STORE_DIFF_4P( p, t, z ) \ - "psraw $6, "#p" \n\t"\ - "movd (%0), "#t" \n\t"\ - "punpcklbw "#z", "#t" \n\t"\ - "paddsw "#t", "#p" \n\t"\ - "packuswb "#z", "#p" \n\t"\ - "movd "#p", (%0) \n\t" - -static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride) -{ - /* Load dct coeffs */ - asm volatile( - "movq (%0), %%mm0 \n\t" - "movq 8(%0), %%mm1 \n\t" - "movq 16(%0), %%mm2 \n\t" - "movq 24(%0), %%mm3 \n\t" - :: "r"(block) ); - - asm volatile( - /* mm1=s02+s13 mm2=s02-s13 mm4=d02+d13 mm0=d02-d13 */ - IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 ) - - "movq %0, %%mm6 \n\t" - /* in: 1,4,0,2 out: 1,2,3,0 */ - TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 ) - - "paddw %%mm6, %%mm3 \n\t" - - /* mm2=s02+s13 mm3=s02-s13 mm4=d02+d13 mm1=d02-d13 */ - IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 ) - - "pxor %%mm7, %%mm7 \n\t" - :: "m"(ff_pw_32)); - - asm volatile( - STORE_DIFF_4P( %%mm0, %%mm1, %%mm7) - "add %1, %0 \n\t" - STORE_DIFF_4P( %%mm2, %%mm1, %%mm7) - "add %1, %0 \n\t" - STORE_DIFF_4P( %%mm3, %%mm1, %%mm7) - "add %1, %0 \n\t" - STORE_DIFF_4P( %%mm4, %%mm1, %%mm7) - : "+r"(dst) - : "r" ((long)stride) - ); -} - -static inline void h264_idct8_1d(int16_t *block) -{ - asm volatile( - "movq 112(%0), %%mm7 \n\t" - "movq 80(%0), %%mm5 \n\t" - "movq 48(%0), %%mm3 \n\t" - "movq 16(%0), %%mm1 \n\t" - - "movq %%mm7, %%mm4 \n\t" - "movq %%mm3, %%mm6 \n\t" - "movq %%mm5, %%mm0 \n\t" - "movq %%mm7, %%mm2 \n\t" - "psraw $1, %%mm4 \n\t" - "psraw $1, %%mm6 \n\t" - "psubw %%mm7, %%mm0 \n\t" - "psubw %%mm6, %%mm2 \n\t" - "psubw %%mm4, %%mm0 \n\t" - "psubw %%mm3, %%mm2 \n\t" - "psubw %%mm3, %%mm0 \n\t" - "paddw %%mm1, %%mm2 \n\t" - - "movq %%mm5, %%mm4 \n\t" - "movq %%mm1, %%mm6 \n\t" - "psraw $1, %%mm4 \n\t" - "psraw $1, %%mm6 \n\t" - "paddw %%mm5, %%mm4 \n\t" - "paddw %%mm1, %%mm6 \n\t" - "paddw %%mm7, %%mm4 \n\t" - "paddw %%mm5, %%mm6 \n\t" - "psubw %%mm1, %%mm4 \n\t" - "paddw %%mm3, %%mm6 \n\t" - - "movq %%mm0, %%mm1 \n\t" - "movq %%mm4, %%mm3 \n\t" - "movq %%mm2, %%mm5 \n\t" - "movq %%mm6, %%mm7 \n\t" - "psraw $2, %%mm6 \n\t" - "psraw $2, %%mm3 \n\t" - "psraw $2, %%mm5 \n\t" - "psraw $2, %%mm0 \n\t" - "paddw %%mm6, %%mm1 \n\t" - "paddw %%mm2, %%mm3 \n\t" - "psubw %%mm4, %%mm5 \n\t" - "psubw %%mm0, %%mm7 \n\t" - - "movq 32(%0), %%mm2 \n\t" - "movq 96(%0), %%mm6 \n\t" - "movq %%mm2, %%mm4 \n\t" - "movq %%mm6, %%mm0 \n\t" - "psraw $1, %%mm4 \n\t" - "psraw $1, %%mm6 \n\t" - "psubw %%mm0, %%mm4 \n\t" - "paddw %%mm2, %%mm6 \n\t" - - "movq (%0), %%mm2 \n\t" - "movq 64(%0), %%mm0 \n\t" - SUMSUB_BA( %%mm0, %%mm2 ) - SUMSUB_BA( %%mm6, %%mm0 ) - SUMSUB_BA( %%mm4, %%mm2 ) - SUMSUB_BA( %%mm7, %%mm6 ) - SUMSUB_BA( %%mm5, %%mm4 ) - SUMSUB_BA( %%mm3, %%mm2 ) - SUMSUB_BA( %%mm1, %%mm0 ) - :: "r"(block) - ); -} - -static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride) -{ - int i; - int16_t __attribute__ ((aligned(8))) b2[64]; - - block[0] += 32; - - for(i=0; i<2; i++){ - DECLARE_ALIGNED_8(uint64_t, tmp); - - h264_idct8_1d(block+4*i); - - asm volatile( - "movq %%mm7, %0 \n\t" - TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 ) - "movq %%mm0, 8(%1) \n\t" - "movq %%mm6, 24(%1) \n\t" - "movq %%mm7, 40(%1) \n\t" - "movq %%mm4, 56(%1) \n\t" - "movq %0, %%mm7 \n\t" - TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 ) - "movq %%mm7, (%1) \n\t" - "movq %%mm1, 16(%1) \n\t" - "movq %%mm0, 32(%1) \n\t" - "movq %%mm3, 48(%1) \n\t" - : "=m"(tmp) - : "r"(b2+32*i) - : "memory" - ); - } - - for(i=0; i<2; i++){ - h264_idct8_1d(b2+4*i); - - asm volatile( - "psraw $6, %%mm7 \n\t" - "psraw $6, %%mm6 \n\t" - "psraw $6, %%mm5 \n\t" - "psraw $6, %%mm4 \n\t" - "psraw $6, %%mm3 \n\t" - "psraw $6, %%mm2 \n\t" - "psraw $6, %%mm1 \n\t" - "psraw $6, %%mm0 \n\t" - - "movq %%mm7, (%0) \n\t" - "movq %%mm5, 16(%0) \n\t" - "movq %%mm3, 32(%0) \n\t" - "movq %%mm1, 48(%0) \n\t" - "movq %%mm0, 64(%0) \n\t" - "movq %%mm2, 80(%0) \n\t" - "movq %%mm4, 96(%0) \n\t" - "movq %%mm6, 112(%0) \n\t" - :: "r"(b2+4*i) - : "memory" - ); - } - - add_pixels_clamped_mmx(b2, dst, stride); -} - -static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) -{ - int dc = (block[0] + 32) >> 6; - asm volatile( - "movd %0, %%mm0 \n\t" - "pshufw $0, %%mm0, %%mm0 \n\t" - "pxor %%mm1, %%mm1 \n\t" - "psubw %%mm0, %%mm1 \n\t" - "packuswb %%mm0, %%mm0 \n\t" - "packuswb %%mm1, %%mm1 \n\t" - ::"r"(dc) - ); - asm volatile( - "movd %0, %%mm2 \n\t" - "movd %1, %%mm3 \n\t" - "movd %2, %%mm4 \n\t" - "movd %3, %%mm5 \n\t" - "paddusb %%mm0, %%mm2 \n\t" - "paddusb %%mm0, %%mm3 \n\t" - "paddusb %%mm0, %%mm4 \n\t" - "paddusb %%mm0, %%mm5 \n\t" - "psubusb %%mm1, %%mm2 \n\t" - "psubusb %%mm1, %%mm3 \n\t" - "psubusb %%mm1, %%mm4 \n\t" - "psubusb %%mm1, %%mm5 \n\t" - "movd %%mm2, %0 \n\t" - "movd %%mm3, %1 \n\t" - "movd %%mm4, %2 \n\t" - "movd %%mm5, %3 \n\t" - :"+m"(*(uint32_t*)(dst+0*stride)), - "+m"(*(uint32_t*)(dst+1*stride)), - "+m"(*(uint32_t*)(dst+2*stride)), - "+m"(*(uint32_t*)(dst+3*stride)) - ); -} - -static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride) -{ - int dc = (block[0] + 32) >> 6; - int y; - asm volatile( - "movd %0, %%mm0 \n\t" - "pshufw $0, %%mm0, %%mm0 \n\t" - "pxor %%mm1, %%mm1 \n\t" - "psubw %%mm0, %%mm1 \n\t" - "packuswb %%mm0, %%mm0 \n\t" - "packuswb %%mm1, %%mm1 \n\t" - ::"r"(dc) - ); - for(y=2; y--; dst += 4*stride){ - asm volatile( - "movq %0, %%mm2 \n\t" - "movq %1, %%mm3 \n\t" - "movq %2, %%mm4 \n\t" - "movq %3, %%mm5 \n\t" - "paddusb %%mm0, %%mm2 \n\t" - "paddusb %%mm0, %%mm3 \n\t" - "paddusb %%mm0, %%mm4 \n\t" - "paddusb %%mm0, %%mm5 \n\t" - "psubusb %%mm1, %%mm2 \n\t" - "psubusb %%mm1, %%mm3 \n\t" - "psubusb %%mm1, %%mm4 \n\t" - "psubusb %%mm1, %%mm5 \n\t" - "movq %%mm2, %0 \n\t" - "movq %%mm3, %1 \n\t" - "movq %%mm4, %2 \n\t" - "movq %%mm5, %3 \n\t" - :"+m"(*(uint64_t*)(dst+0*stride)), - "+m"(*(uint64_t*)(dst+1*stride)), - "+m"(*(uint64_t*)(dst+2*stride)), - "+m"(*(uint64_t*)(dst+3*stride)) - ); - } -} - - -/***********************************/ -/* deblocking */ - -// out: o = |x-y|>a -// clobbers: t -#define DIFF_GT_MMX(x,y,a,o,t)\ - "movq "#y", "#t" \n\t"\ - "movq "#x", "#o" \n\t"\ - "psubusb "#x", "#t" \n\t"\ - "psubusb "#y", "#o" \n\t"\ - "por "#t", "#o" \n\t"\ - "psubusb "#a", "#o" \n\t" - -// out: o = |x-y|>a -// clobbers: t -#define DIFF_GT2_MMX(x,y,a,o,t)\ - "movq "#y", "#t" \n\t"\ - "movq "#x", "#o" \n\t"\ - "psubusb "#x", "#t" \n\t"\ - "psubusb "#y", "#o" \n\t"\ - "psubusb "#a", "#t" \n\t"\ - "psubusb "#a", "#o" \n\t"\ - "pcmpeqb "#t", "#o" \n\t"\ - -// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 -// out: mm5=beta-1, mm7=mask -// clobbers: mm4,mm6 -#define H264_DEBLOCK_MASK(alpha1, beta1) \ - "pshufw $0, "#alpha1", %%mm4 \n\t"\ - "pshufw $0, "#beta1 ", %%mm5 \n\t"\ - "packuswb %%mm4, %%mm4 \n\t"\ - "packuswb %%mm5, %%mm5 \n\t"\ - DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\ - DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\ - "por %%mm4, %%mm7 \n\t"\ - DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\ - "por %%mm4, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "pcmpeqb %%mm6, %%mm7 \n\t" - -// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) -// out: mm1=p0' mm2=q0' -// clobbers: mm0,3-6 -#define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\ - "movq %%mm1 , %%mm5 \n\t"\ - "pxor %%mm2 , %%mm5 \n\t" /* p0^q0*/\ - "pand "#pb_01" , %%mm5 \n\t" /* (p0^q0)&1*/\ - "pcmpeqb %%mm4 , %%mm4 \n\t"\ - "pxor %%mm4 , %%mm3 \n\t"\ - "pavgb %%mm0 , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\ - "pavgb "MANGLE(ff_pb_3)" , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\ - "pxor %%mm1 , %%mm4 \n\t"\ - "pavgb %%mm2 , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\ - "pavgb %%mm5 , %%mm3 \n\t"\ - "paddusb %%mm4 , %%mm3 \n\t" /* d+128+33*/\ - "movq "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\ - "psubusb %%mm3 , %%mm6 \n\t"\ - "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\ - "pminub %%mm7 , %%mm6 \n\t"\ - "pminub %%mm7 , %%mm3 \n\t"\ - "psubusb %%mm6 , %%mm1 \n\t"\ - "psubusb %%mm3 , %%mm2 \n\t"\ - "paddusb %%mm3 , %%mm1 \n\t"\ - "paddusb %%mm6 , %%mm2 \n\t" - -// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone -// out: (q1addr) = clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 ) -// clobbers: q2, tmp, tc0 -#define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\ - "movq %%mm1, "#tmp" \n\t"\ - "pavgb %%mm2, "#tmp" \n\t"\ - "pavgb "#tmp", "#q2" \n\t" /* avg(p2,avg(p0,q0)) */\ - "pxor "q2addr", "#tmp" \n\t"\ - "pand %8, "#tmp" \n\t" /* (p2^avg(p0,q0))&1 */\ - "psubusb "#tmp", "#q2" \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\ - "movq "#p1", "#tmp" \n\t"\ - "psubusb "#tc0", "#tmp" \n\t"\ - "paddusb "#p1", "#tc0" \n\t"\ - "pmaxub "#tmp", "#q2" \n\t"\ - "pminub "#tc0", "#q2" \n\t"\ - "movq "#q2", "q1addr" \n\t" - -static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) -{ - DECLARE_ALIGNED_8(uint64_t, tmp0[2]); - - asm volatile( - "movq (%1,%3), %%mm0 \n\t" //p1 - "movq (%1,%3,2), %%mm1 \n\t" //p0 - "movq (%2), %%mm2 \n\t" //q0 - "movq (%2,%3), %%mm3 \n\t" //q1 - H264_DEBLOCK_MASK(%6, %7) - - "movd %5, %%mm4 \n\t" - "punpcklbw %%mm4, %%mm4 \n\t" - "punpcklwd %%mm4, %%mm4 \n\t" - "pcmpeqb %%mm3, %%mm3 \n\t" - "movq %%mm4, %%mm6 \n\t" - "pcmpgtb %%mm3, %%mm4 \n\t" - "movq %%mm6, 8+%0 \n\t" - "pand %%mm4, %%mm7 \n\t" - "movq %%mm7, %0 \n\t" - - /* filter p1 */ - "movq (%1), %%mm3 \n\t" //p2 - DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1 - "pand %%mm7, %%mm6 \n\t" // mask & |p2-p0|<beta - "pand 8+%0, %%mm7 \n\t" // mask & tc0 - "movq %%mm7, %%mm4 \n\t" - "psubb %%mm6, %%mm7 \n\t" - "pand %%mm4, %%mm6 \n\t" // mask & |p2-p0|<beta & tc0 - H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4) - - /* filter q1 */ - "movq (%2,%3,2), %%mm4 \n\t" //q2 - DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1 - "pand %0, %%mm6 \n\t" - "movq 8+%0, %%mm5 \n\t" // can be merged with the and below but is slower then - "pand %%mm6, %%mm5 \n\t" - "psubb %%mm6, %%mm7 \n\t" - "movq (%2,%3), %%mm3 \n\t" - H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6) - - /* filter p0, q0 */ - H264_DEBLOCK_P0_Q0(%8, unused) - "movq %%mm1, (%1,%3,2) \n\t" - "movq %%mm2, (%2) \n\t" - - : "=m"(*tmp0) - : "r"(pix-3*stride), "r"(pix), "r"((long)stride), - "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1), - "m"(mm_bone) - ); -} - -static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) -{ - if((tc0[0] & tc0[1]) >= 0) - h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0); - if((tc0[2] & tc0[3]) >= 0) - h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2); -} -static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) -{ - //FIXME: could cut some load/stores by merging transpose with filter - // also, it only needs to transpose 6x8 - DECLARE_ALIGNED_8(uint8_t, trans[8*8]); - int i; - for(i=0; i<2; i++, pix+=8*stride, tc0+=2) { - if((tc0[0] & tc0[1]) < 0) - continue; - transpose4x4(trans, pix-4, 8, stride); - transpose4x4(trans +4*8, pix, 8, stride); - transpose4x4(trans+4, pix-4+4*stride, 8, stride); - transpose4x4(trans+4+4*8, pix +4*stride, 8, stride); - h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0); - transpose4x4(pix-2, trans +2*8, stride, 8); - transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8); - } -} - -static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0) -{ - asm volatile( - "movq (%0), %%mm0 \n\t" //p1 - "movq (%0,%2), %%mm1 \n\t" //p0 - "movq (%1), %%mm2 \n\t" //q0 - "movq (%1,%2), %%mm3 \n\t" //q1 - H264_DEBLOCK_MASK(%4, %5) - "movd %3, %%mm6 \n\t" - "punpcklbw %%mm6, %%mm6 \n\t" - "pand %%mm6, %%mm7 \n\t" // mm7 = tc&mask - H264_DEBLOCK_P0_Q0(%6, %7) - "movq %%mm1, (%0,%2) \n\t" - "movq %%mm2, (%1) \n\t" - - :: "r"(pix-2*stride), "r"(pix), "r"((long)stride), - "r"(*(uint32_t*)tc0), - "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F) - ); -} - -static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) -{ - h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0); -} - -static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) -{ - //FIXME: could cut some load/stores by merging transpose with filter - DECLARE_ALIGNED_8(uint8_t, trans[8*4]); - transpose4x4(trans, pix-2, 8, stride); - transpose4x4(trans+4, pix-2+4*stride, 8, stride); - h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0); - transpose4x4(pix-2, trans, stride, 8); - transpose4x4(pix-2+4*stride, trans+4, stride, 8); -} - -// p0 = (p0 + q1 + 2*p1 + 2) >> 2 -#define H264_FILTER_CHROMA4(p0, p1, q1, one) \ - "movq "#p0", %%mm4 \n\t"\ - "pxor "#q1", %%mm4 \n\t"\ - "pand "#one", %%mm4 \n\t" /* mm4 = (p0^q1)&1 */\ - "pavgb "#q1", "#p0" \n\t"\ - "psubusb %%mm4, "#p0" \n\t"\ - "pavgb "#p1", "#p0" \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\ - -static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1) -{ - asm volatile( - "movq (%0), %%mm0 \n\t" - "movq (%0,%2), %%mm1 \n\t" - "movq (%1), %%mm2 \n\t" - "movq (%1,%2), %%mm3 \n\t" - H264_DEBLOCK_MASK(%3, %4) - "movq %%mm1, %%mm5 \n\t" - "movq %%mm2, %%mm6 \n\t" - H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0' - H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0' - "psubb %%mm5, %%mm1 \n\t" - "psubb %%mm6, %%mm2 \n\t" - "pand %%mm7, %%mm1 \n\t" - "pand %%mm7, %%mm2 \n\t" - "paddb %%mm5, %%mm1 \n\t" - "paddb %%mm6, %%mm2 \n\t" - "movq %%mm1, (%0,%2) \n\t" - "movq %%mm2, (%1) \n\t" - :: "r"(pix-2*stride), "r"(pix), "r"((long)stride), - "m"(alpha1), "m"(beta1), "m"(mm_bone) - ); -} - -static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) -{ - h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1); -} - -static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta) -{ - //FIXME: could cut some load/stores by merging transpose with filter - DECLARE_ALIGNED_8(uint8_t, trans[8*4]); - transpose4x4(trans, pix-2, 8, stride); - transpose4x4(trans+4, pix-2+4*stride, 8, stride); - h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1); - transpose4x4(pix-2, trans, stride, 8); - transpose4x4(pix-2+4*stride, trans+4, stride, 8); -} - -static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], - int bidir, int edges, int step, int mask_mv0, int mask_mv1 ) { - int dir; - asm volatile( - "pxor %%mm7, %%mm7 \n\t" - "movq %0, %%mm6 \n\t" - "movq %1, %%mm5 \n\t" - "movq %2, %%mm4 \n\t" - ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7) - ); - // could do a special case for dir==0 && edges==1, but it only reduces the - // average filter time by 1.2% - for( dir=1; dir>=0; dir-- ) { - const int d_idx = dir ? -8 : -1; - const int mask_mv = dir ? mask_mv1 : mask_mv0; - DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL; - int b_idx, edge, l; - for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) { - asm volatile( - "pand %0, %%mm0 \n\t" - ::"m"(mask_dir) - ); - if(!(mask_mv & edge)) { - asm volatile("pxor %%mm0, %%mm0 \n\t":); - for( l = bidir; l >= 0; l-- ) { - asm volatile( - "movd %0, %%mm1 \n\t" - "punpckldq %1, %%mm1 \n\t" - "movq %%mm1, %%mm2 \n\t" - "psrlw $7, %%mm2 \n\t" - "pand %%mm6, %%mm2 \n\t" - "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1 - "punpckldq %%mm1, %%mm2 \n\t" - "pcmpeqb %%mm2, %%mm1 \n\t" - "paddb %%mm6, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn] - "por %%mm1, %%mm0 \n\t" - - "movq %2, %%mm1 \n\t" - "movq %3, %%mm2 \n\t" - "psubw %4, %%mm1 \n\t" - "psubw %5, %%mm2 \n\t" - "packsswb %%mm2, %%mm1 \n\t" - "paddb %%mm5, %%mm1 \n\t" - "pminub %%mm4, %%mm1 \n\t" - "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit - "por %%mm1, %%mm0 \n\t" - ::"m"(ref[l][b_idx]), - "m"(ref[l][b_idx+d_idx]), - "m"(mv[l][b_idx][0]), - "m"(mv[l][b_idx+2][0]), - "m"(mv[l][b_idx+d_idx][0]), - "m"(mv[l][b_idx+d_idx+2][0]) - ); - } - } - asm volatile( - "movd %0, %%mm1 \n\t" - "por %1, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn] - ::"m"(nnz[b_idx]), - "m"(nnz[b_idx+d_idx]) - ); - asm volatile( - "pcmpeqw %%mm7, %%mm0 \n\t" - "pcmpeqw %%mm7, %%mm0 \n\t" - "psrlw $15, %%mm0 \n\t" // nonzero -> 1 - "psrlw $14, %%mm1 \n\t" - "movq %%mm0, %%mm2 \n\t" - "por %%mm1, %%mm2 \n\t" - "psrlw $1, %%mm1 \n\t" - "pandn %%mm2, %%mm1 \n\t" - "movq %%mm1, %0 \n\t" - :"=m"(*bS[dir][edge]) - ::"memory" - ); - } - edges = 4; - step = 1; - } - asm volatile( - "movq (%0), %%mm0 \n\t" - "movq 8(%0), %%mm1 \n\t" - "movq 16(%0), %%mm2 \n\t" - "movq 24(%0), %%mm3 \n\t" - TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4) - "movq %%mm0, (%0) \n\t" - "movq %%mm3, 8(%0) \n\t" - "movq %%mm4, 16(%0) \n\t" - "movq %%mm2, 24(%0) \n\t" - ::"r"(bS[0]) - :"memory" - ); -} - -/***********************************/ -/* motion compensation */ - -#define QPEL_H264V(A,B,C,D,E,F,OP)\ - "movd (%0), "#F" \n\t"\ - "movq "#C", %%mm6 \n\t"\ - "paddw "#D", %%mm6 \n\t"\ - "psllw $2, %%mm6 \n\t"\ - "psubw "#B", %%mm6 \n\t"\ - "psubw "#E", %%mm6 \n\t"\ - "pmullw %4, %%mm6 \n\t"\ - "add %2, %0 \n\t"\ - "punpcklbw %%mm7, "#F" \n\t"\ - "paddw %5, "#A" \n\t"\ - "paddw "#F", "#A" \n\t"\ - "paddw "#A", %%mm6 \n\t"\ - "psraw $5, %%mm6 \n\t"\ - "packuswb %%mm6, %%mm6 \n\t"\ - OP(%%mm6, (%1), A, d)\ - "add %3, %1 \n\t" - -#define QPEL_H264HV(A,B,C,D,E,F,OF)\ - "movd (%0), "#F" \n\t"\ - "movq "#C", %%mm6 \n\t"\ - "paddw "#D", %%mm6 \n\t"\ - "psllw $2, %%mm6 \n\t"\ - "psubw "#B", %%mm6 \n\t"\ - "psubw "#E", %%mm6 \n\t"\ - "pmullw %3, %%mm6 \n\t"\ - "add %2, %0 \n\t"\ - "punpcklbw %%mm7, "#F" \n\t"\ - "paddw "#F", "#A" \n\t"\ - "paddw "#A", %%mm6 \n\t"\ - "movq %%mm6, "#OF"(%1) \n\t" - -#define QPEL_H264(OPNAME, OP, MMX)\ -static void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - int h=4;\ -\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "movq %5, %%mm4 \n\t"\ - "movq %6, %%mm5 \n\t"\ - "1: \n\t"\ - "movd -1(%0), %%mm1 \n\t"\ - "movd (%0), %%mm2 \n\t"\ - "movd 1(%0), %%mm3 \n\t"\ - "movd 2(%0), %%mm0 \n\t"\ - "punpcklbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpcklbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "paddw %%mm0, %%mm1 \n\t"\ - "paddw %%mm3, %%mm2 \n\t"\ - "movd -2(%0), %%mm0 \n\t"\ - "movd 3(%0), %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpcklbw %%mm7, %%mm3 \n\t"\ - "paddw %%mm3, %%mm0 \n\t"\ - "psllw $2, %%mm2 \n\t"\ - "psubw %%mm1, %%mm2 \n\t"\ - "pmullw %%mm4, %%mm2 \n\t"\ - "paddw %%mm5, %%mm0 \n\t"\ - "paddw %%mm2, %%mm0 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "packuswb %%mm0, %%mm0 \n\t"\ - OP(%%mm0, (%1),%%mm6, d)\ - "add %3, %0 \n\t"\ - "add %4, %1 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - : "+a"(src), "+c"(dst), "+m"(h)\ - : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ - : "memory"\ - );\ -}\ -static void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ - int h=4;\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "movq %0, %%mm4 \n\t"\ - "movq %1, %%mm5 \n\t"\ - :: "m"(ff_pw_5), "m"(ff_pw_16)\ - );\ - do{\ - asm volatile(\ - "movd -1(%0), %%mm1 \n\t"\ - "movd (%0), %%mm2 \n\t"\ - "movd 1(%0), %%mm3 \n\t"\ - "movd 2(%0), %%mm0 \n\t"\ - "punpcklbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpcklbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "paddw %%mm0, %%mm1 \n\t"\ - "paddw %%mm3, %%mm2 \n\t"\ - "movd -2(%0), %%mm0 \n\t"\ - "movd 3(%0), %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpcklbw %%mm7, %%mm3 \n\t"\ - "paddw %%mm3, %%mm0 \n\t"\ - "psllw $2, %%mm2 \n\t"\ - "psubw %%mm1, %%mm2 \n\t"\ - "pmullw %%mm4, %%mm2 \n\t"\ - "paddw %%mm5, %%mm0 \n\t"\ - "paddw %%mm2, %%mm0 \n\t"\ - "movd (%2), %%mm3 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "packuswb %%mm0, %%mm0 \n\t"\ - PAVGB" %%mm3, %%mm0 \n\t"\ - OP(%%mm0, (%1),%%mm6, d)\ - "add %4, %0 \n\t"\ - "add %4, %1 \n\t"\ - "add %3, %2 \n\t"\ - : "+a"(src), "+c"(dst), "+d"(src2)\ - : "D"((long)src2Stride), "S"((long)dstStride)\ - : "memory"\ - );\ - }while(--h);\ -}\ -static void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - src -= 2*srcStride;\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "movd (%0), %%mm0 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm1 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm2 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm3 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm4 \n\t"\ - "add %2, %0 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpcklbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpcklbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm4 \n\t"\ - QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ - QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ - QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ - QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ - \ - : "+a"(src), "+c"(dst)\ - : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ - : "memory"\ - );\ -}\ -static void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ - int h=4;\ - int w=3;\ - src -= 2*srcStride+2;\ - while(w--){\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "movd (%0), %%mm0 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm1 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm2 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm3 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm4 \n\t"\ - "add %2, %0 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpcklbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpcklbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm4 \n\t"\ - QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\ - QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\ - QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\ - QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\ - \ - : "+a"(src)\ - : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\ - : "memory"\ - );\ - tmp += 4;\ - src += 4 - 9*srcStride;\ - }\ - tmp -= 3*4;\ - asm volatile(\ - "movq %4, %%mm6 \n\t"\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t"\ - "paddw 10(%0), %%mm0 \n\t"\ - "movq 2(%0), %%mm1 \n\t"\ - "paddw 8(%0), %%mm1 \n\t"\ - "movq 4(%0), %%mm2 \n\t"\ - "paddw 6(%0), %%mm2 \n\t"\ - "psubw %%mm1, %%mm0 \n\t"/*a-b (abccba)*/\ - "psraw $2, %%mm0 \n\t"/*(a-b)/4 */\ - "psubw %%mm1, %%mm0 \n\t"/*(a-b)/4-b */\ - "paddsw %%mm2, %%mm0 \n\t"\ - "psraw $2, %%mm0 \n\t"/*((a-b)/4-b+c)/4 */\ - "paddw %%mm6, %%mm2 \n\t"\ - "paddw %%mm2, %%mm0 \n\t"/*(a-5*b+20*c)/16 +32 */\ - "psraw $6, %%mm0 \n\t"\ - "packuswb %%mm0, %%mm0 \n\t"\ - OP(%%mm0, (%1),%%mm7, d)\ - "add $24, %0 \n\t"\ - "add %3, %1 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - : "+a"(tmp), "+c"(dst), "+m"(h)\ - : "S"((long)dstStride), "m"(ff_pw_32)\ - : "memory"\ - );\ -}\ -\ -static void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - int h=8;\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "movq %5, %%mm6 \n\t"\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t"\ - "movq 1(%0), %%mm2 \n\t"\ - "movq %%mm0, %%mm1 \n\t"\ - "movq %%mm2, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpckhbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpckhbw %%mm7, %%mm3 \n\t"\ - "paddw %%mm2, %%mm0 \n\t"\ - "paddw %%mm3, %%mm1 \n\t"\ - "psllw $2, %%mm0 \n\t"\ - "psllw $2, %%mm1 \n\t"\ - "movq -1(%0), %%mm2 \n\t"\ - "movq 2(%0), %%mm4 \n\t"\ - "movq %%mm2, %%mm3 \n\t"\ - "movq %%mm4, %%mm5 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpckhbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm4 \n\t"\ - "punpckhbw %%mm7, %%mm5 \n\t"\ - "paddw %%mm4, %%mm2 \n\t"\ - "paddw %%mm3, %%mm5 \n\t"\ - "psubw %%mm2, %%mm0 \n\t"\ - "psubw %%mm5, %%mm1 \n\t"\ - "pmullw %%mm6, %%mm0 \n\t"\ - "pmullw %%mm6, %%mm1 \n\t"\ - "movd -2(%0), %%mm2 \n\t"\ - "movd 7(%0), %%mm5 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpcklbw %%mm7, %%mm5 \n\t"\ - "paddw %%mm3, %%mm2 \n\t"\ - "paddw %%mm5, %%mm4 \n\t"\ - "movq %6, %%mm5 \n\t"\ - "paddw %%mm5, %%mm2 \n\t"\ - "paddw %%mm5, %%mm4 \n\t"\ - "paddw %%mm2, %%mm0 \n\t"\ - "paddw %%mm4, %%mm1 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "psraw $5, %%mm1 \n\t"\ - "packuswb %%mm1, %%mm0 \n\t"\ - OP(%%mm0, (%1),%%mm5, q)\ - "add %3, %0 \n\t"\ - "add %4, %1 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - : "+a"(src), "+c"(dst), "+m"(h)\ - : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ - : "memory"\ - );\ -}\ -\ -static void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ - int h=8;\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "movq %0, %%mm6 \n\t"\ - :: "m"(ff_pw_5)\ - );\ - do{\ - asm volatile(\ - "movq (%0), %%mm0 \n\t"\ - "movq 1(%0), %%mm2 \n\t"\ - "movq %%mm0, %%mm1 \n\t"\ - "movq %%mm2, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpckhbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpckhbw %%mm7, %%mm3 \n\t"\ - "paddw %%mm2, %%mm0 \n\t"\ - "paddw %%mm3, %%mm1 \n\t"\ - "psllw $2, %%mm0 \n\t"\ - "psllw $2, %%mm1 \n\t"\ - "movq -1(%0), %%mm2 \n\t"\ - "movq 2(%0), %%mm4 \n\t"\ - "movq %%mm2, %%mm3 \n\t"\ - "movq %%mm4, %%mm5 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpckhbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm4 \n\t"\ - "punpckhbw %%mm7, %%mm5 \n\t"\ - "paddw %%mm4, %%mm2 \n\t"\ - "paddw %%mm3, %%mm5 \n\t"\ - "psubw %%mm2, %%mm0 \n\t"\ - "psubw %%mm5, %%mm1 \n\t"\ - "pmullw %%mm6, %%mm0 \n\t"\ - "pmullw %%mm6, %%mm1 \n\t"\ - "movd -2(%0), %%mm2 \n\t"\ - "movd 7(%0), %%mm5 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpcklbw %%mm7, %%mm5 \n\t"\ - "paddw %%mm3, %%mm2 \n\t"\ - "paddw %%mm5, %%mm4 \n\t"\ - "movq %5, %%mm5 \n\t"\ - "paddw %%mm5, %%mm2 \n\t"\ - "paddw %%mm5, %%mm4 \n\t"\ - "paddw %%mm2, %%mm0 \n\t"\ - "paddw %%mm4, %%mm1 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "psraw $5, %%mm1 \n\t"\ - "movq (%2), %%mm4 \n\t"\ - "packuswb %%mm1, %%mm0 \n\t"\ - PAVGB" %%mm4, %%mm0 \n\t"\ - OP(%%mm0, (%1),%%mm5, q)\ - "add %4, %0 \n\t"\ - "add %4, %1 \n\t"\ - "add %3, %2 \n\t"\ - : "+a"(src), "+c"(dst), "+d"(src2)\ - : "D"((long)src2Stride), "S"((long)dstStride),\ - "m"(ff_pw_16)\ - : "memory"\ - );\ - }while(--h);\ -}\ -\ -static inline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\ - int w= 2;\ - src -= 2*srcStride;\ - \ - while(w--){\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "movd (%0), %%mm0 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm1 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm2 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm3 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm4 \n\t"\ - "add %2, %0 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpcklbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpcklbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm4 \n\t"\ - QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ - QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ - QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ - QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ - QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ - QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ - QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ - QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ - \ - : "+a"(src), "+c"(dst)\ - : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ - : "memory"\ - );\ - if(h==16){\ - asm volatile(\ - QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ - QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ - QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\ - QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\ - QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\ - QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\ - QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\ - QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\ - \ - : "+a"(src), "+c"(dst)\ - : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\ - : "memory"\ - );\ - }\ - src += 4-(h+5)*srcStride;\ - dst += 4-h*dstStride;\ - }\ -}\ -static inline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\ - int h = size;\ - int w = (size+8)>>2;\ - src -= 2*srcStride+2;\ - while(w--){\ - asm volatile(\ - "pxor %%mm7, %%mm7 \n\t"\ - "movd (%0), %%mm0 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm1 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm2 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm3 \n\t"\ - "add %2, %0 \n\t"\ - "movd (%0), %%mm4 \n\t"\ - "add %2, %0 \n\t"\ - "punpcklbw %%mm7, %%mm0 \n\t"\ - "punpcklbw %%mm7, %%mm1 \n\t"\ - "punpcklbw %%mm7, %%mm2 \n\t"\ - "punpcklbw %%mm7, %%mm3 \n\t"\ - "punpcklbw %%mm7, %%mm4 \n\t"\ - QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\ - QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\ - QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\ - QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\ - QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\ - QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\ - QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\ - QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\ - : "+a"(src)\ - : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\ - : "memory"\ - );\ - if(size==16){\ - asm volatile(\ - QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 8*48)\ - QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 9*48)\ - QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\ - QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\ - QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\ - QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\ - QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\ - QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\ - : "+a"(src)\ - : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\ - : "memory"\ - );\ - }\ - tmp += 4;\ - src += 4 - (size+5)*srcStride;\ - }\ - tmp -= size+8;\ - w = size>>4;\ - do{\ - h = size;\ - asm volatile(\ - "movq %4, %%mm6 \n\t"\ - "1: \n\t"\ - "movq (%0), %%mm0 \n\t"\ - "movq 8(%0), %%mm3 \n\t"\ - "movq 2(%0), %%mm1 \n\t"\ - "movq 10(%0), %%mm4 \n\t"\ - "paddw %%mm4, %%mm0 \n\t"\ - "paddw %%mm3, %%mm1 \n\t"\ - "paddw 18(%0), %%mm3 \n\t"\ - "paddw 16(%0), %%mm4 \n\t"\ - "movq 4(%0), %%mm2 \n\t"\ - "movq 12(%0), %%mm5 \n\t"\ - "paddw 6(%0), %%mm2 \n\t"\ - "paddw 14(%0), %%mm5 \n\t"\ - "psubw %%mm1, %%mm0 \n\t"\ - "psubw %%mm4, %%mm3 \n\t"\ - "psraw $2, %%mm0 \n\t"\ - "psraw $2, %%mm3 \n\t"\ - "psubw %%mm1, %%mm0 \n\t"\ - "psubw %%mm4, %%mm3 \n\t"\ - "paddsw %%mm2, %%mm0 \n\t"\ - "paddsw %%mm5, %%mm3 \n\t"\ - "psraw $2, %%mm0 \n\t"\ - "psraw $2, %%mm3 \n\t"\ - "paddw %%mm6, %%mm2 \n\t"\ - "paddw %%mm6, %%mm5 \n\t"\ - "paddw %%mm2, %%mm0 \n\t"\ - "paddw %%mm5, %%mm3 \n\t"\ - "psraw $6, %%mm0 \n\t"\ - "psraw $6, %%mm3 \n\t"\ - "packuswb %%mm3, %%mm0 \n\t"\ - OP(%%mm0, (%1),%%mm7, q)\ - "add $48, %0 \n\t"\ - "add %3, %1 \n\t"\ - "decl %2 \n\t"\ - " jnz 1b \n\t"\ - : "+a"(tmp), "+c"(dst), "+m"(h)\ - : "S"((long)dstStride), "m"(ff_pw_32)\ - : "memory"\ - );\ - tmp += 8 - size*24;\ - dst += 8 - size*dstStride;\ - }while(w--);\ -}\ -\ -static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\ -}\ -static void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\ - OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\ -}\ -\ -static void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\ - OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ - OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ - src += 8*srcStride;\ - dst += 8*dstStride;\ - OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\ - OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\ -}\ -\ -static void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\ - OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ - OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ - src += 8*dstStride;\ - dst += 8*dstStride;\ - src2 += 8*src2Stride;\ - OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\ - OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\ -}\ -\ -static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ - OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\ -}\ -\ -static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\ - OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\ -}\ -\ -static void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ -{\ - asm volatile(\ - "movq %5, %%mm6 \n\t"\ - "movq (%1), %%mm0 \n\t"\ - "movq 24(%1), %%mm1 \n\t"\ - "paddw %%mm6, %%mm0 \n\t"\ - "paddw %%mm6, %%mm1 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "psraw $5, %%mm1 \n\t"\ - "packuswb %%mm0, %%mm0 \n\t"\ - "packuswb %%mm1, %%mm1 \n\t"\ - PAVGB" (%0), %%mm0 \n\t"\ - PAVGB" (%0,%3), %%mm1 \n\t"\ - OP(%%mm0, (%2), %%mm4, d)\ - OP(%%mm1, (%2,%4), %%mm5, d)\ - "lea (%0,%3,2), %0 \n\t"\ - "lea (%2,%4,2), %2 \n\t"\ - "movq 48(%1), %%mm0 \n\t"\ - "movq 72(%1), %%mm1 \n\t"\ - "paddw %%mm6, %%mm0 \n\t"\ - "paddw %%mm6, %%mm1 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "psraw $5, %%mm1 \n\t"\ - "packuswb %%mm0, %%mm0 \n\t"\ - "packuswb %%mm1, %%mm1 \n\t"\ - PAVGB" (%0), %%mm0 \n\t"\ - PAVGB" (%0,%3), %%mm1 \n\t"\ - OP(%%mm0, (%2), %%mm4, d)\ - OP(%%mm1, (%2,%4), %%mm5, d)\ - :"+a"(src8), "+c"(src16), "+d"(dst)\ - :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\ - :"memory");\ -}\ -static void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ -{\ - asm volatile(\ - "movq %0, %%mm6 \n\t"\ - ::"m"(ff_pw_16)\ - );\ - while(h--){\ - asm volatile(\ - "movq (%1), %%mm0 \n\t"\ - "movq 8(%1), %%mm1 \n\t"\ - "paddw %%mm6, %%mm0 \n\t"\ - "paddw %%mm6, %%mm1 \n\t"\ - "psraw $5, %%mm0 \n\t"\ - "psraw $5, %%mm1 \n\t"\ - "packuswb %%mm1, %%mm0 \n\t"\ - PAVGB" (%0), %%mm0 \n\t"\ - OP(%%mm0, (%2), %%mm5, q)\ - ::"a"(src8), "c"(src16), "d"(dst)\ - :"memory");\ - src8 += src8Stride;\ - src16 += 24;\ - dst += dstStride;\ - }\ -}\ -static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\ -{\ - OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\ - OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\ -}\ - - -#define H264_MC(OPNAME, SIZE, MMX) \ -static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8];\ - uint8_t * const half= (uint8_t*)temp;\ - put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8];\ - uint8_t * const half= (uint8_t*)temp;\ - put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8];\ - uint8_t * const halfV= (uint8_t*)temp;\ - put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8];\ - uint8_t * const halfV= (uint8_t*)temp;\ - put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8];\ - uint8_t * const halfV= (uint8_t*)temp;\ - put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8];\ - uint8_t * const halfV= (uint8_t*)temp;\ - put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE<8?12:24)/4];\ - int16_t * const tmp= (int16_t*)temp;\ - OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ - uint8_t * const halfHV= (uint8_t*)temp;\ - int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\ - put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ - uint8_t * const halfHV= (uint8_t*)temp;\ - int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\ - put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\ - OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ - int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\ - uint8_t * const halfHV= ((uint8_t*)temp);\ - put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\ -}\ -\ -static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\ - int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\ - uint8_t * const halfHV= ((uint8_t*)temp);\ - put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\ - OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\ -}\ - - -#define AVG_3DNOW_OP(a,b,temp, size) \ -"mov" #size " " #b ", " #temp " \n\t"\ -"pavgusb " #temp ", " #a " \n\t"\ -"mov" #size " " #a ", " #b " \n\t" -#define AVG_MMX2_OP(a,b,temp, size) \ -"mov" #size " " #b ", " #temp " \n\t"\ -"pavgb " #temp ", " #a " \n\t"\ -"mov" #size " " #a ", " #b " \n\t" - -#define PAVGB "pavgusb" -QPEL_H264(put_, PUT_OP, 3dnow) -QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow) -#undef PAVGB -#define PAVGB "pavgb" -QPEL_H264(put_, PUT_OP, mmx2) -QPEL_H264(avg_, AVG_MMX2_OP, mmx2) -#undef PAVGB - -H264_MC(put_, 4, 3dnow) -H264_MC(put_, 8, 3dnow) -H264_MC(put_, 16,3dnow) -H264_MC(avg_, 4, 3dnow) -H264_MC(avg_, 8, 3dnow) -H264_MC(avg_, 16,3dnow) -H264_MC(put_, 4, mmx2) -H264_MC(put_, 8, mmx2) -H264_MC(put_, 16,mmx2) -H264_MC(avg_, 4, mmx2) -H264_MC(avg_, 8, mmx2) -H264_MC(avg_, 16,mmx2) - - -#define H264_CHROMA_OP(S,D) -#define H264_CHROMA_OP4(S,D,T) -#define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx -#define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx -#define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2 -#define H264_CHROMA_MC8_MV0 put_pixels8_mmx -#include "dsputil_h264_template_mmx.c" -#undef H264_CHROMA_OP -#undef H264_CHROMA_OP4 -#undef H264_CHROMA_MC8_TMPL -#undef H264_CHROMA_MC4_TMPL -#undef H264_CHROMA_MC2_TMPL -#undef H264_CHROMA_MC8_MV0 - -#define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t" -#define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ - "pavgb " #T ", " #D " \n\t" -#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2 -#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2 -#define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2 -#define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2 -#include "dsputil_h264_template_mmx.c" -#undef H264_CHROMA_OP -#undef H264_CHROMA_OP4 -#undef H264_CHROMA_MC8_TMPL -#undef H264_CHROMA_MC4_TMPL -#undef H264_CHROMA_MC2_TMPL -#undef H264_CHROMA_MC8_MV0 - -#define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t" -#define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\ - "pavgusb " #T ", " #D " \n\t" -#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow -#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow -#define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow -#include "dsputil_h264_template_mmx.c" -#undef H264_CHROMA_OP -#undef H264_CHROMA_OP4 -#undef H264_CHROMA_MC8_TMPL -#undef H264_CHROMA_MC4_TMPL -#undef H264_CHROMA_MC8_MV0 - -/***********************************/ -/* weighted prediction */ - -static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h) -{ - int x, y; - offset <<= log2_denom; - offset += (1 << log2_denom) >> 1; - asm volatile( - "movd %0, %%mm4 \n\t" - "movd %1, %%mm5 \n\t" - "movd %2, %%mm6 \n\t" - "pshufw $0, %%mm4, %%mm4 \n\t" - "pshufw $0, %%mm5, %%mm5 \n\t" - "pxor %%mm7, %%mm7 \n\t" - :: "g"(weight), "g"(offset), "g"(log2_denom) - ); - for(y=0; y<h; y+=2){ - for(x=0; x<w; x+=4){ - asm volatile( - "movd %0, %%mm0 \n\t" - "movd %1, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "pmullw %%mm4, %%mm0 \n\t" - "pmullw %%mm4, %%mm1 \n\t" - "paddsw %%mm5, %%mm0 \n\t" - "paddsw %%mm5, %%mm1 \n\t" - "psraw %%mm6, %%mm0 \n\t" - "psraw %%mm6, %%mm1 \n\t" - "packuswb %%mm7, %%mm0 \n\t" - "packuswb %%mm7, %%mm1 \n\t" - "movd %%mm0, %0 \n\t" - "movd %%mm1, %1 \n\t" - : "+m"(*(uint32_t*)(dst+x)), - "+m"(*(uint32_t*)(dst+x+stride)) - ); - } - dst += 2*stride; - } -} - -static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h) -{ - int x, y; - offset = ((offset + 1) | 1) << log2_denom; - asm volatile( - "movd %0, %%mm3 \n\t" - "movd %1, %%mm4 \n\t" - "movd %2, %%mm5 \n\t" - "movd %3, %%mm6 \n\t" - "pshufw $0, %%mm3, %%mm3 \n\t" - "pshufw $0, %%mm4, %%mm4 \n\t" - "pshufw $0, %%mm5, %%mm5 \n\t" - "pxor %%mm7, %%mm7 \n\t" - :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1) - ); - for(y=0; y<h; y++){ - for(x=0; x<w; x+=4){ - asm volatile( - "movd %0, %%mm0 \n\t" - "movd %1, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "pmullw %%mm3, %%mm0 \n\t" - "pmullw %%mm4, %%mm1 \n\t" - "paddsw %%mm1, %%mm0 \n\t" - "paddsw %%mm5, %%mm0 \n\t" - "psraw %%mm6, %%mm0 \n\t" - "packuswb %%mm0, %%mm0 \n\t" - "movd %%mm0, %0 \n\t" - : "+m"(*(uint32_t*)(dst+x)) - : "m"(*(uint32_t*)(src+x)) - ); - } - src += stride; - dst += stride; - } -} - -#define H264_WEIGHT(W,H) \ -static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ - ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \ -} \ -static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \ - ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \ -} - -H264_WEIGHT(16,16) -H264_WEIGHT(16, 8) -H264_WEIGHT( 8,16) -H264_WEIGHT( 8, 8) -H264_WEIGHT( 8, 4) -H264_WEIGHT( 4, 8) -H264_WEIGHT( 4, 4) -H264_WEIGHT( 4, 2) - diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx.c b/src/libffmpeg/libavcodec/i386/idct_mmx.c deleted file mode 100644 index 4c548fdce..000000000 --- a/src/libffmpeg/libavcodec/i386/idct_mmx.c +++ /dev/null @@ -1,597 +0,0 @@ -/* - * idct_mmx.c - * Copyright (C) 1999-2001 Aaron Holtzman <aholtzma@ess.engr.uvic.ca> - * - * This file is part of mpeg2dec, a free MPEG-2 video stream decoder. - * - * mpeg2dec is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * mpeg2dec is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with mpeg2dec; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include "common.h" -#include "../dsputil.h" - -#include "mmx.h" - -#define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align))) - -#define ROW_SHIFT 11 -#define COL_SHIFT 6 - -#define round(bias) ((int)(((bias)+0.5) * (1<<ROW_SHIFT))) -#define rounder(bias) {round (bias), round (bias)} - -#if 0 -/* C row IDCT - its just here to document the MMXEXT and MMX versions */ -static inline void idct_row (int16_t * row, int offset, - int16_t * table, int32_t * rounder) -{ - int C1, C2, C3, C4, C5, C6, C7; - int a0, a1, a2, a3, b0, b1, b2, b3; - - row += offset; - - C1 = table[1]; - C2 = table[2]; - C3 = table[3]; - C4 = table[4]; - C5 = table[5]; - C6 = table[6]; - C7 = table[7]; - - a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + *rounder; - a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + *rounder; - a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + *rounder; - a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + *rounder; - - b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7]; - b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7]; - b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7]; - b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7]; - - row[0] = (a0 + b0) >> ROW_SHIFT; - row[1] = (a1 + b1) >> ROW_SHIFT; - row[2] = (a2 + b2) >> ROW_SHIFT; - row[3] = (a3 + b3) >> ROW_SHIFT; - row[4] = (a3 - b3) >> ROW_SHIFT; - row[5] = (a2 - b2) >> ROW_SHIFT; - row[6] = (a1 - b1) >> ROW_SHIFT; - row[7] = (a0 - b0) >> ROW_SHIFT; -} -#endif - - -/* MMXEXT row IDCT */ - -#define mmxext_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, -c4, -c2, \ - c4, c6, c4, c6, \ - c1, c3, -c1, -c5, \ - c5, c7, c3, -c7, \ - c4, -c6, c4, -c6, \ - -c4, c2, c4, -c2, \ - c5, -c1, c3, -c1, \ - c7, c3, c7, -c5 } - -static inline void mmxext_row_head (int16_t * row, int offset, const int16_t * table) -{ - movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0 - - movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1 - movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0 - - movq_m2r (*table, mm3); // mm3 = -C2 -C4 C2 C4 - movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1 - - movq_m2r (*(table+4), mm4); // mm4 = C6 C4 C6 C4 - pmaddwd_r2r (mm0, mm3); // mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 - - pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4 -} - -static inline void mmxext_row (const int16_t * table, const int32_t * rounder) -{ - movq_m2r (*(table+8), mm1); // mm1 = -C5 -C1 C3 C1 - pmaddwd_r2r (mm2, mm4); // mm4 = C4*x0+C6*x2 C4*x4+C6*x6 - - pmaddwd_m2r (*(table+16), mm0); // mm0 = C4*x4-C6*x6 C4*x0-C6*x2 - pshufw_r2r (mm6, mm6, 0x4e); // mm6 = x3 x1 x7 x5 - - movq_m2r (*(table+12), mm7); // mm7 = -C7 C3 C7 C5 - pmaddwd_r2r (mm5, mm1); // mm1 = -C1*x5-C5*x7 C1*x1+C3*x3 - - paddd_m2r (*rounder, mm3); // mm3 += rounder - pmaddwd_r2r (mm6, mm7); // mm7 = C3*x1-C7*x3 C5*x5+C7*x7 - - pmaddwd_m2r (*(table+20), mm2); // mm2 = C4*x0-C2*x2 -C4*x4+C2*x6 - paddd_r2r (mm4, mm3); // mm3 = a1 a0 + rounder - - pmaddwd_m2r (*(table+24), mm5); // mm5 = C3*x5-C1*x7 C5*x1-C1*x3 - movq_r2r (mm3, mm4); // mm4 = a1 a0 + rounder - - pmaddwd_m2r (*(table+28), mm6); // mm6 = C7*x1-C5*x3 C7*x5+C3*x7 - paddd_r2r (mm7, mm1); // mm1 = b1 b0 - - paddd_m2r (*rounder, mm0); // mm0 += rounder - psubd_r2r (mm1, mm3); // mm3 = a1-b1 a0-b0 + rounder - - psrad_i2r (ROW_SHIFT, mm3); // mm3 = y6 y7 - paddd_r2r (mm4, mm1); // mm1 = a1+b1 a0+b0 + rounder - - paddd_r2r (mm2, mm0); // mm0 = a3 a2 + rounder - psrad_i2r (ROW_SHIFT, mm1); // mm1 = y1 y0 - - paddd_r2r (mm6, mm5); // mm5 = b3 b2 - movq_r2r (mm0, mm4); // mm4 = a3 a2 + rounder - - paddd_r2r (mm5, mm0); // mm0 = a3+b3 a2+b2 + rounder - psubd_r2r (mm5, mm4); // mm4 = a3-b3 a2-b2 + rounder -} - -static inline void mmxext_row_tail (int16_t * row, int store) -{ - psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2 - - psrad_i2r (ROW_SHIFT, mm4); // mm4 = y4 y5 - - packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0 - - packssdw_r2r (mm3, mm4); // mm4 = y6 y7 y4 y5 - - movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0 - pshufw_r2r (mm4, mm4, 0xb1); // mm4 = y7 y6 y5 y4 - - /* slot */ - - movq_r2m (mm4, *(row+store+4)); // save y7 y6 y5 y4 -} - -static inline void mmxext_row_mid (int16_t * row, int store, - int offset, const int16_t * table) -{ - movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0 - psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2 - - movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1 - psrad_i2r (ROW_SHIFT, mm4); // mm4 = y4 y5 - - packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0 - movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1 - - packssdw_r2r (mm3, mm4); // mm4 = y6 y7 y4 y5 - movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0 - - movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0 - pshufw_r2r (mm4, mm4, 0xb1); // mm4 = y7 y6 y5 y4 - - movq_m2r (*table, mm3); // mm3 = -C2 -C4 C2 C4 - movq_r2m (mm4, *(row+store+4)); // save y7 y6 y5 y4 - - pmaddwd_r2r (mm0, mm3); // mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 - - movq_m2r (*(table+4), mm4); // mm4 = C6 C4 C6 C4 - pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4 -} - - -/* MMX row IDCT */ - -#define mmx_table(c1,c2,c3,c4,c5,c6,c7) { c4, c2, c4, c6, \ - c4, c6, -c4, -c2, \ - c1, c3, c3, -c7, \ - c5, c7, -c1, -c5, \ - c4, -c6, c4, -c2, \ - -c4, c2, c4, -c6, \ - c5, -c1, c7, -c5, \ - c7, c3, c3, -c1 } - -static inline void mmx_row_head (int16_t * row, int offset, const int16_t * table) -{ - movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0 - - movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1 - movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0 - - movq_m2r (*table, mm3); // mm3 = C6 C4 C2 C4 - movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1 - - punpckldq_r2r (mm0, mm0); // mm0 = x2 x0 x2 x0 - - movq_m2r (*(table+4), mm4); // mm4 = -C2 -C4 C6 C4 - pmaddwd_r2r (mm0, mm3); // mm3 = C4*x0+C6*x2 C4*x0+C2*x2 - - movq_m2r (*(table+8), mm1); // mm1 = -C7 C3 C3 C1 - punpckhdq_r2r (mm2, mm2); // mm2 = x6 x4 x6 x4 -} - -static inline void mmx_row (const int16_t * table, const int32_t * rounder) -{ - pmaddwd_r2r (mm2, mm4); // mm4 = -C4*x4-C2*x6 C4*x4+C6*x6 - punpckldq_r2r (mm5, mm5); // mm5 = x3 x1 x3 x1 - - pmaddwd_m2r (*(table+16), mm0); // mm0 = C4*x0-C2*x2 C4*x0-C6*x2 - punpckhdq_r2r (mm6, mm6); // mm6 = x7 x5 x7 x5 - - movq_m2r (*(table+12), mm7); // mm7 = -C5 -C1 C7 C5 - pmaddwd_r2r (mm5, mm1); // mm1 = C3*x1-C7*x3 C1*x1+C3*x3 - - paddd_m2r (*rounder, mm3); // mm3 += rounder - pmaddwd_r2r (mm6, mm7); // mm7 = -C1*x5-C5*x7 C5*x5+C7*x7 - - pmaddwd_m2r (*(table+20), mm2); // mm2 = C4*x4-C6*x6 -C4*x4+C2*x6 - paddd_r2r (mm4, mm3); // mm3 = a1 a0 + rounder - - pmaddwd_m2r (*(table+24), mm5); // mm5 = C7*x1-C5*x3 C5*x1-C1*x3 - movq_r2r (mm3, mm4); // mm4 = a1 a0 + rounder - - pmaddwd_m2r (*(table+28), mm6); // mm6 = C3*x5-C1*x7 C7*x5+C3*x7 - paddd_r2r (mm7, mm1); // mm1 = b1 b0 - - paddd_m2r (*rounder, mm0); // mm0 += rounder - psubd_r2r (mm1, mm3); // mm3 = a1-b1 a0-b0 + rounder - - psrad_i2r (ROW_SHIFT, mm3); // mm3 = y6 y7 - paddd_r2r (mm4, mm1); // mm1 = a1+b1 a0+b0 + rounder - - paddd_r2r (mm2, mm0); // mm0 = a3 a2 + rounder - psrad_i2r (ROW_SHIFT, mm1); // mm1 = y1 y0 - - paddd_r2r (mm6, mm5); // mm5 = b3 b2 - movq_r2r (mm0, mm7); // mm7 = a3 a2 + rounder - - paddd_r2r (mm5, mm0); // mm0 = a3+b3 a2+b2 + rounder - psubd_r2r (mm5, mm7); // mm7 = a3-b3 a2-b2 + rounder -} - -static inline void mmx_row_tail (int16_t * row, int store) -{ - psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2 - - psrad_i2r (ROW_SHIFT, mm7); // mm7 = y4 y5 - - packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0 - - packssdw_r2r (mm3, mm7); // mm7 = y6 y7 y4 y5 - - movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0 - movq_r2r (mm7, mm4); // mm4 = y6 y7 y4 y5 - - pslld_i2r (16, mm7); // mm7 = y7 0 y5 0 - - psrld_i2r (16, mm4); // mm4 = 0 y6 0 y4 - - por_r2r (mm4, mm7); // mm7 = y7 y6 y5 y4 - - /* slot */ - - movq_r2m (mm7, *(row+store+4)); // save y7 y6 y5 y4 -} - -static inline void mmx_row_mid (int16_t * row, int store, - int offset, const int16_t * table) -{ - movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0 - psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2 - - movq_m2r (*(row+offset+4), mm5); // mm5 = x7 x5 x3 x1 - psrad_i2r (ROW_SHIFT, mm7); // mm7 = y4 y5 - - packssdw_r2r (mm0, mm1); // mm1 = y3 y2 y1 y0 - movq_r2r (mm5, mm6); // mm6 = x7 x5 x3 x1 - - packssdw_r2r (mm3, mm7); // mm7 = y6 y7 y4 y5 - movq_r2r (mm2, mm0); // mm0 = x6 x4 x2 x0 - - movq_r2m (mm1, *(row+store)); // save y3 y2 y1 y0 - movq_r2r (mm7, mm1); // mm1 = y6 y7 y4 y5 - - punpckldq_r2r (mm0, mm0); // mm0 = x2 x0 x2 x0 - psrld_i2r (16, mm7); // mm7 = 0 y6 0 y4 - - movq_m2r (*table, mm3); // mm3 = C6 C4 C2 C4 - pslld_i2r (16, mm1); // mm1 = y7 0 y5 0 - - movq_m2r (*(table+4), mm4); // mm4 = -C2 -C4 C6 C4 - por_r2r (mm1, mm7); // mm7 = y7 y6 y5 y4 - - movq_m2r (*(table+8), mm1); // mm1 = -C7 C3 C3 C1 - punpckhdq_r2r (mm2, mm2); // mm2 = x6 x4 x6 x4 - - movq_r2m (mm7, *(row+store+4)); // save y7 y6 y5 y4 - pmaddwd_r2r (mm0, mm3); // mm3 = C4*x0+C6*x2 C4*x0+C2*x2 -} - - -#if 0 -// C column IDCT - its just here to document the MMXEXT and MMX versions -static inline void idct_col (int16_t * col, int offset) -{ -/* multiplication - as implemented on mmx */ -#define F(c,x) (((c) * (x)) >> 16) - -/* saturation - it helps us handle torture test cases */ -#define S(x) (((x)>32767) ? 32767 : ((x)<-32768) ? -32768 : (x)) - - int16_t x0, x1, x2, x3, x4, x5, x6, x7; - int16_t y0, y1, y2, y3, y4, y5, y6, y7; - int16_t a0, a1, a2, a3, b0, b1, b2, b3; - int16_t u04, v04, u26, v26, u17, v17, u35, v35, u12, v12; - - col += offset; - - x0 = col[0*8]; - x1 = col[1*8]; - x2 = col[2*8]; - x3 = col[3*8]; - x4 = col[4*8]; - x5 = col[5*8]; - x6 = col[6*8]; - x7 = col[7*8]; - - u04 = S (x0 + x4); - v04 = S (x0 - x4); - u26 = S (F (T2, x6) + x2); - v26 = S (F (T2, x2) - x6); - - a0 = S (u04 + u26); - a1 = S (v04 + v26); - a2 = S (v04 - v26); - a3 = S (u04 - u26); - - u17 = S (F (T1, x7) + x1); - v17 = S (F (T1, x1) - x7); - u35 = S (F (T3, x5) + x3); - v35 = S (F (T3, x3) - x5); - - b0 = S (u17 + u35); - b3 = S (v17 - v35); - u12 = S (u17 - u35); - v12 = S (v17 + v35); - u12 = S (2 * F (C4, u12)); - v12 = S (2 * F (C4, v12)); - b1 = S (u12 + v12); - b2 = S (u12 - v12); - - y0 = S (a0 + b0) >> COL_SHIFT; - y1 = S (a1 + b1) >> COL_SHIFT; - y2 = S (a2 + b2) >> COL_SHIFT; - y3 = S (a3 + b3) >> COL_SHIFT; - - y4 = S (a3 - b3) >> COL_SHIFT; - y5 = S (a2 - b2) >> COL_SHIFT; - y6 = S (a1 - b1) >> COL_SHIFT; - y7 = S (a0 - b0) >> COL_SHIFT; - - col[0*8] = y0; - col[1*8] = y1; - col[2*8] = y2; - col[3*8] = y3; - col[4*8] = y4; - col[5*8] = y5; - col[6*8] = y6; - col[7*8] = y7; -} -#endif - - -// MMX column IDCT -static inline void idct_col (int16_t * col, int offset) -{ -#define T1 13036 -#define T2 27146 -#define T3 43790 -#define C4 23170 - - static const short _T1[] ATTR_ALIGN(8) = {T1,T1,T1,T1}; - static const short _T2[] ATTR_ALIGN(8) = {T2,T2,T2,T2}; - static const short _T3[] ATTR_ALIGN(8) = {T3,T3,T3,T3}; - static const short _C4[] ATTR_ALIGN(8) = {C4,C4,C4,C4}; - - /* column code adapted from peter gubanov */ - /* http://www.elecard.com/peter/idct.shtml */ - - movq_m2r (*_T1, mm0); // mm0 = T1 - - movq_m2r (*(col+offset+1*8), mm1); // mm1 = x1 - movq_r2r (mm0, mm2); // mm2 = T1 - - movq_m2r (*(col+offset+7*8), mm4); // mm4 = x7 - pmulhw_r2r (mm1, mm0); // mm0 = T1*x1 - - movq_m2r (*_T3, mm5); // mm5 = T3 - pmulhw_r2r (mm4, mm2); // mm2 = T1*x7 - - movq_m2r (*(col+offset+5*8), mm6); // mm6 = x5 - movq_r2r (mm5, mm7); // mm7 = T3-1 - - movq_m2r (*(col+offset+3*8), mm3); // mm3 = x3 - psubsw_r2r (mm4, mm0); // mm0 = v17 - - movq_m2r (*_T2, mm4); // mm4 = T2 - pmulhw_r2r (mm3, mm5); // mm5 = (T3-1)*x3 - - paddsw_r2r (mm2, mm1); // mm1 = u17 - pmulhw_r2r (mm6, mm7); // mm7 = (T3-1)*x5 - - /* slot */ - - movq_r2r (mm4, mm2); // mm2 = T2 - paddsw_r2r (mm3, mm5); // mm5 = T3*x3 - - pmulhw_m2r (*(col+offset+2*8), mm4);// mm4 = T2*x2 - paddsw_r2r (mm6, mm7); // mm7 = T3*x5 - - psubsw_r2r (mm6, mm5); // mm5 = v35 - paddsw_r2r (mm3, mm7); // mm7 = u35 - - movq_m2r (*(col+offset+6*8), mm3); // mm3 = x6 - movq_r2r (mm0, mm6); // mm6 = v17 - - pmulhw_r2r (mm3, mm2); // mm2 = T2*x6 - psubsw_r2r (mm5, mm0); // mm0 = b3 - - psubsw_r2r (mm3, mm4); // mm4 = v26 - paddsw_r2r (mm6, mm5); // mm5 = v12 - - movq_r2m (mm0, *(col+offset+3*8)); // save b3 in scratch0 - movq_r2r (mm1, mm6); // mm6 = u17 - - paddsw_m2r (*(col+offset+2*8), mm2);// mm2 = u26 - paddsw_r2r (mm7, mm6); // mm6 = b0 - - psubsw_r2r (mm7, mm1); // mm1 = u12 - movq_r2r (mm1, mm7); // mm7 = u12 - - movq_m2r (*(col+offset+0*8), mm3); // mm3 = x0 - paddsw_r2r (mm5, mm1); // mm1 = u12+v12 - - movq_m2r (*_C4, mm0); // mm0 = C4/2 - psubsw_r2r (mm5, mm7); // mm7 = u12-v12 - - movq_r2m (mm6, *(col+offset+5*8)); // save b0 in scratch1 - pmulhw_r2r (mm0, mm1); // mm1 = b1/2 - - movq_r2r (mm4, mm6); // mm6 = v26 - pmulhw_r2r (mm0, mm7); // mm7 = b2/2 - - movq_m2r (*(col+offset+4*8), mm5); // mm5 = x4 - movq_r2r (mm3, mm0); // mm0 = x0 - - psubsw_r2r (mm5, mm3); // mm3 = v04 - paddsw_r2r (mm5, mm0); // mm0 = u04 - - paddsw_r2r (mm3, mm4); // mm4 = a1 - movq_r2r (mm0, mm5); // mm5 = u04 - - psubsw_r2r (mm6, mm3); // mm3 = a2 - paddsw_r2r (mm2, mm5); // mm5 = a0 - - paddsw_r2r (mm1, mm1); // mm1 = b1 - psubsw_r2r (mm2, mm0); // mm0 = a3 - - paddsw_r2r (mm7, mm7); // mm7 = b2 - movq_r2r (mm3, mm2); // mm2 = a2 - - movq_r2r (mm4, mm6); // mm6 = a1 - paddsw_r2r (mm7, mm3); // mm3 = a2+b2 - - psraw_i2r (COL_SHIFT, mm3); // mm3 = y2 - paddsw_r2r (mm1, mm4); // mm4 = a1+b1 - - psraw_i2r (COL_SHIFT, mm4); // mm4 = y1 - psubsw_r2r (mm1, mm6); // mm6 = a1-b1 - - movq_m2r (*(col+offset+5*8), mm1); // mm1 = b0 - psubsw_r2r (mm7, mm2); // mm2 = a2-b2 - - psraw_i2r (COL_SHIFT, mm6); // mm6 = y6 - movq_r2r (mm5, mm7); // mm7 = a0 - - movq_r2m (mm4, *(col+offset+1*8)); // save y1 - psraw_i2r (COL_SHIFT, mm2); // mm2 = y5 - - movq_r2m (mm3, *(col+offset+2*8)); // save y2 - paddsw_r2r (mm1, mm5); // mm5 = a0+b0 - - movq_m2r (*(col+offset+3*8), mm4); // mm4 = b3 - psubsw_r2r (mm1, mm7); // mm7 = a0-b0 - - psraw_i2r (COL_SHIFT, mm5); // mm5 = y0 - movq_r2r (mm0, mm3); // mm3 = a3 - - movq_r2m (mm2, *(col+offset+5*8)); // save y5 - psubsw_r2r (mm4, mm3); // mm3 = a3-b3 - - psraw_i2r (COL_SHIFT, mm7); // mm7 = y7 - paddsw_r2r (mm0, mm4); // mm4 = a3+b3 - - movq_r2m (mm5, *(col+offset+0*8)); // save y0 - psraw_i2r (COL_SHIFT, mm3); // mm3 = y4 - - movq_r2m (mm6, *(col+offset+6*8)); // save y6 - psraw_i2r (COL_SHIFT, mm4); // mm4 = y3 - - movq_r2m (mm7, *(col+offset+7*8)); // save y7 - - movq_r2m (mm3, *(col+offset+4*8)); // save y4 - - movq_r2m (mm4, *(col+offset+3*8)); // save y3 - -#undef T1 -#undef T2 -#undef T3 -#undef C4 -} - -static const int32_t rounder0[] ATTR_ALIGN(8) = - rounder ((1 << (COL_SHIFT - 1)) - 0.5); -static const int32_t rounder4[] ATTR_ALIGN(8) = rounder (0); -static const int32_t rounder1[] ATTR_ALIGN(8) = - rounder (1.25683487303); /* C1*(C1/C4+C1+C7)/2 */ -static const int32_t rounder7[] ATTR_ALIGN(8) = - rounder (-0.25); /* C1*(C7/C4+C7-C1)/2 */ -static const int32_t rounder2[] ATTR_ALIGN(8) = - rounder (0.60355339059); /* C2 * (C6+C2)/2 */ -static const int32_t rounder6[] ATTR_ALIGN(8) = - rounder (-0.25); /* C2 * (C6-C2)/2 */ -static const int32_t rounder3[] ATTR_ALIGN(8) = - rounder (0.087788325588); /* C3*(-C3/C4+C3+C5)/2 */ -static const int32_t rounder5[] ATTR_ALIGN(8) = - rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */ - -#undef COL_SHIFT -#undef ROW_SHIFT - -#define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \ -void idct (int16_t * block) \ -{ \ - static const int16_t table04[] ATTR_ALIGN(16) = \ - table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \ - static const int16_t table17[] ATTR_ALIGN(16) = \ - table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \ - static const int16_t table26[] ATTR_ALIGN(16) = \ - table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \ - static const int16_t table35[] ATTR_ALIGN(16) = \ - table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \ - \ - idct_row_head (block, 0*8, table04); \ - idct_row (table04, rounder0); \ - idct_row_mid (block, 0*8, 4*8, table04); \ - idct_row (table04, rounder4); \ - idct_row_mid (block, 4*8, 1*8, table17); \ - idct_row (table17, rounder1); \ - idct_row_mid (block, 1*8, 7*8, table17); \ - idct_row (table17, rounder7); \ - idct_row_mid (block, 7*8, 2*8, table26); \ - idct_row (table26, rounder2); \ - idct_row_mid (block, 2*8, 6*8, table26); \ - idct_row (table26, rounder6); \ - idct_row_mid (block, 6*8, 3*8, table35); \ - idct_row (table35, rounder3); \ - idct_row_mid (block, 3*8, 5*8, table35); \ - idct_row (table35, rounder5); \ - idct_row_tail (block, 5*8); \ - \ - idct_col (block, 0); \ - idct_col (block, 4); \ -} - -void ff_mmx_idct(DCTELEM *block); -void ff_mmxext_idct(DCTELEM *block); - -declare_idct (ff_mmxext_idct, mmxext_table, - mmxext_row_head, mmxext_row, mmxext_row_tail, mmxext_row_mid) - -declare_idct (ff_mmx_idct, mmx_table, - mmx_row_head, mmx_row, mmx_row_tail, mmx_row_mid) - diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c b/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c deleted file mode 100644 index 59b255943..000000000 --- a/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c +++ /dev/null @@ -1,535 +0,0 @@ -///**************************************************************************** -// * -// * XVID MPEG-4 VIDEO CODEC -// * - MMX and XMM forward discrete cosine transform - -// * -// * Copyright(C) 2001 Peter Ross <pross@xvid.org> -// * -// * This file is part of FFmpeg. -// * -// * FFmpeg is free software; you can redistribute it and/or -// * modify it under the terms of the GNU Lesser General Public -// * License as published by the Free Software Foundation; either -// * version 2.1 of the License, or (at your option) any later version. -// * -// * FFmpeg is distributed in the hope that it will be useful, -// * but WITHOUT ANY WARRANTY; without even the implied warranty of -// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -// * Lesser General Public License for more details. -// * -// * You should have received a copy of the GNU Lesser General Public License -// * along with FFmpeg; if not, write to the Free Software Foundation, -// * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -// * -// * $Id: idct_mmx_xvid.c,v 1.4 2006/12/04 22:25:26 miguelfreitas Exp $ -// * -// ***************************************************************************/ - -// **************************************************************************** -// -// Originally provided by Intel at AP-922 -// http://developer.intel.com/vtune/cbts/strmsimd/922down.htm -// (See more app notes at http://developer.intel.com/vtune/cbts/strmsimd/appnotes.htm) -// but in a limited edition. -// New macro implements a column part for precise iDCT -// The routine precision now satisfies IEEE standard 1180-1990. -// -// Copyright(C) 2000-2001 Peter Gubanov <peter@elecard.net.ru> -// Rounding trick Copyright(C) 2000 Michel Lespinasse <walken@zoy.org> -// -// http://www.elecard.com/peter/idct.html -// http://www.linuxvideo.org/mpeg2dec/ -// -// ***************************************************************************/ -// -// These examples contain code fragments for first stage iDCT 8x8 -// (for rows) and first stage DCT 8x8 (for columns) -// - -// conversion to gcc syntax by michael niedermayer - - -#include <inttypes.h> -#include "../avcodec.h" - -//============================================================================= -// Macros and other preprocessor constants -//============================================================================= - -#define BITS_INV_ACC 5 // 4 or 5 for IEEE -#define SHIFT_INV_ROW (16 - BITS_INV_ACC) //11 -#define SHIFT_INV_COL (1 + BITS_INV_ACC) //6 -#define RND_INV_ROW (1024 * (6 - BITS_INV_ACC)) -#define RND_INV_COL (16 * (BITS_INV_ACC - 3)) -#define RND_INV_CORR (RND_INV_COL - 1) - -#define BITS_FRW_ACC 3 // 2 or 3 for accuracy -#define SHIFT_FRW_COL BITS_FRW_ACC -#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17) -#define RND_FRW_ROW (262144*(BITS_FRW_ACC - 1)) - - -//----------------------------------------------------------------------------- -// Various memory constants (trigonometric values or rounding values) -//----------------------------------------------------------------------------- - - -static const int16_t tg_1_16[4*4] attribute_used __attribute__ ((aligned(8))) = { - 13036,13036,13036,13036, // tg * (2<<16) + 0.5 - 27146,27146,27146,27146, // tg * (2<<16) + 0.5 - -21746,-21746,-21746,-21746, // tg * (2<<16) + 0.5 - 23170,23170,23170,23170}; // cos * (2<<15) + 0.5 - -static const int32_t rounder_0[2*8] attribute_used __attribute__ ((aligned(8))) = { - 65536,65536, - 3597,3597, - 2260,2260, - 1203,1203, - 0,0, - 120,120, - 512,512, - 512,512}; - -//----------------------------------------------------------------------------- -// -// The first stage iDCT 8x8 - inverse DCTs of rows -// -//----------------------------------------------------------------------------- -// The 8-point inverse DCT direct algorithm -//----------------------------------------------------------------------------- -// -// static const short w[32] = { -// FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16), -// FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16), -// FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16), -// FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16), -// FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16), -// FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16), -// FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16), -// FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) }; -// -// #define DCT_8_INV_ROW(x, y) -// { -// int a0, a1, a2, a3, b0, b1, b2, b3; -// -// a0 =x[0]*w[0]+x[2]*w[1]+x[4]*w[2]+x[6]*w[3]; -// a1 =x[0]*w[4]+x[2]*w[5]+x[4]*w[6]+x[6]*w[7]; -// a2 = x[0] * w[ 8] + x[2] * w[ 9] + x[4] * w[10] + x[6] * w[11]; -// a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15]; -// b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19]; -// b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23]; -// b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27]; -// b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31]; -// -// y[0] = SHIFT_ROUND ( a0 + b0 ); -// y[1] = SHIFT_ROUND ( a1 + b1 ); -// y[2] = SHIFT_ROUND ( a2 + b2 ); -// y[3] = SHIFT_ROUND ( a3 + b3 ); -// y[4] = SHIFT_ROUND ( a3 - b3 ); -// y[5] = SHIFT_ROUND ( a2 - b2 ); -// y[6] = SHIFT_ROUND ( a1 - b1 ); -// y[7] = SHIFT_ROUND ( a0 - b0 ); -// } -// -//----------------------------------------------------------------------------- -// -// In this implementation the outputs of the iDCT-1D are multiplied -// for rows 0,4 - by cos_4_16, -// for rows 1,7 - by cos_1_16, -// for rows 2,6 - by cos_2_16, -// for rows 3,5 - by cos_3_16 -// and are shifted to the left for better accuracy -// -// For the constants used, -// FIX(float_const) = (short) (float_const * (1<<15) + 0.5) -// -//----------------------------------------------------------------------------- - -//----------------------------------------------------------------------------- -// Tables for mmx processors -//----------------------------------------------------------------------------- - -// Table for rows 0,4 - constants are multiplied by cos_4_16 -static const int16_t tab_i_04_mmx[32*4] attribute_used __attribute__ ((aligned(8))) = { - 16384,16384,16384,-16384, // movq-> w06 w04 w02 w00 - 21407,8867,8867,-21407, // w07 w05 w03 w01 - 16384,-16384,16384,16384, // w14 w12 w10 w08 - -8867,21407,-21407,-8867, // w15 w13 w11 w09 - 22725,12873,19266,-22725, // w22 w20 w18 w16 - 19266,4520,-4520,-12873, // w23 w21 w19 w17 - 12873,4520,4520,19266, // w30 w28 w26 w24 - -22725,19266,-12873,-22725, // w31 w29 w27 w25 -// Table for rows 1,7 - constants are multiplied by cos_1_16 - 22725,22725,22725,-22725, // movq-> w06 w04 w02 w00 - 29692,12299,12299,-29692, // w07 w05 w03 w01 - 22725,-22725,22725,22725, // w14 w12 w10 w08 - -12299,29692,-29692,-12299, // w15 w13 w11 w09 - 31521,17855,26722,-31521, // w22 w20 w18 w16 - 26722,6270,-6270,-17855, // w23 w21 w19 w17 - 17855,6270,6270,26722, // w30 w28 w26 w24 - -31521,26722,-17855,-31521, // w31 w29 w27 w25 -// Table for rows 2,6 - constants are multiplied by cos_2_16 - 21407,21407,21407,-21407, // movq-> w06 w04 w02 w00 - 27969,11585,11585,-27969, // w07 w05 w03 w01 - 21407,-21407,21407,21407, // w14 w12 w10 w08 - -11585,27969,-27969,-11585, // w15 w13 w11 w09 - 29692,16819,25172,-29692, // w22 w20 w18 w16 - 25172,5906,-5906,-16819, // w23 w21 w19 w17 - 16819,5906,5906,25172, // w30 w28 w26 w24 - -29692,25172,-16819,-29692, // w31 w29 w27 w25 -// Table for rows 3,5 - constants are multiplied by cos_3_16 - 19266,19266,19266,-19266, // movq-> w06 w04 w02 w00 - 25172,10426,10426,-25172, // w07 w05 w03 w01 - 19266,-19266,19266,19266, // w14 w12 w10 w08 - -10426,25172,-25172,-10426, // w15 w13 w11 w09 - 26722,15137,22654,-26722, // w22 w20 w18 w16 - 22654,5315,-5315,-15137, // w23 w21 w19 w17 - 15137,5315,5315,22654, // w30 w28 w26 w24 - -26722,22654,-15137,-26722, // w31 w29 w27 w25 -}; -//----------------------------------------------------------------------------- -// Tables for xmm processors -//----------------------------------------------------------------------------- - -// %3 for rows 0,4 - constants are multiplied by cos_4_16 -static const int16_t tab_i_04_xmm[32*4] attribute_used __attribute__ ((aligned(8))) = { - 16384,21407,16384,8867, // movq-> w05 w04 w01 w00 - 16384,8867,-16384,-21407, // w07 w06 w03 w02 - 16384,-8867,16384,-21407, // w13 w12 w09 w08 - -16384,21407,16384,-8867, // w15 w14 w11 w10 - 22725,19266,19266,-4520, // w21 w20 w17 w16 - 12873,4520,-22725,-12873, // w23 w22 w19 w18 - 12873,-22725,4520,-12873, // w29 w28 w25 w24 - 4520,19266,19266,-22725, // w31 w30 w27 w26 -// %3 for rows 1,7 - constants are multiplied by cos_1_16 - 22725,29692,22725,12299, // movq-> w05 w04 w01 w00 - 22725,12299,-22725,-29692, // w07 w06 w03 w02 - 22725,-12299,22725,-29692, // w13 w12 w09 w08 - -22725,29692,22725,-12299, // w15 w14 w11 w10 - 31521,26722,26722,-6270, // w21 w20 w17 w16 - 17855,6270,-31521,-17855, // w23 w22 w19 w18 - 17855,-31521,6270,-17855, // w29 w28 w25 w24 - 6270,26722,26722,-31521, // w31 w30 w27 w26 -// %3 for rows 2,6 - constants are multiplied by cos_2_16 - 21407,27969,21407,11585, // movq-> w05 w04 w01 w00 - 21407,11585,-21407,-27969, // w07 w06 w03 w02 - 21407,-11585,21407,-27969, // w13 w12 w09 w08 - -21407,27969,21407,-11585, // w15 w14 w11 w10 - 29692,25172,25172,-5906, // w21 w20 w17 w16 - 16819,5906,-29692,-16819, // w23 w22 w19 w18 - 16819,-29692,5906,-16819, // w29 w28 w25 w24 - 5906,25172,25172,-29692, // w31 w30 w27 w26 -// %3 for rows 3,5 - constants are multiplied by cos_3_16 - 19266,25172,19266,10426, // movq-> w05 w04 w01 w00 - 19266,10426,-19266,-25172, // w07 w06 w03 w02 - 19266,-10426,19266,-25172, // w13 w12 w09 w08 - -19266,25172,19266,-10426, // w15 w14 w11 w10 - 26722,22654,22654,-5315, // w21 w20 w17 w16 - 15137,5315,-26722,-15137, // w23 w22 w19 w18 - 15137,-26722,5315,-15137, // w29 w28 w25 w24 - 5315,22654,22654,-26722, // w31 w30 w27 w26 -}; -//============================================================================= -// Helper macros for the code -//============================================================================= - -//----------------------------------------------------------------------------- -// DCT_8_INV_ROW_MMX( INP, OUT, TABLE, ROUNDER -//----------------------------------------------------------------------------- - -#define DCT_8_INV_ROW_MMX(A1,A2,A3,A4)\ - "movq " #A1 ",%%mm0 \n\t"/* 0 ; x3 x2 x1 x0*/\ - "movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\ - "movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\ - "movq " #A3 ",%%mm3 \n\t"/* 3 ; w06 w04 w02 w00*/\ - "punpcklwd %%mm1,%%mm0 \n\t"/* x5 x1 x4 x0*/\ - "movq %%mm0,%%mm5 \n\t"/* 5 ; x5 x1 x4 x0*/\ - "punpckldq %%mm0,%%mm0 \n\t"/* x4 x0 x4 x0*/\ - "movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w05 w03 w01*/\ - "punpckhwd %%mm1,%%mm2 \n\t"/* 1 ; x7 x3 x6 x2*/\ - "pmaddwd %%mm0,%%mm3 \n\t"/* x4*w06+x0*w04 x4*w02+x0*w00*/\ - "movq %%mm2,%%mm6 \n\t"/* 6 ; x7 x3 x6 x2*/\ - "movq 32+" #A3 ",%%mm1 \n\t"/* 1 ; w22 w20 w18 w16*/\ - "punpckldq %%mm2,%%mm2 \n\t"/* x6 x2 x6 x2*/\ - "pmaddwd %%mm2,%%mm4 \n\t"/* x6*w07+x2*w05 x6*w03+x2*w01*/\ - "punpckhdq %%mm5,%%mm5 \n\t"/* x5 x1 x5 x1*/\ - "pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x4*w14+x0*w12 x4*w10+x0*w08*/\ - "punpckhdq %%mm6,%%mm6 \n\t"/* x7 x3 x7 x3*/\ - "movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w21 w19 w17*/\ - "pmaddwd %%mm5,%%mm1 \n\t"/* x5*w22+x1*w20 x5*w18+x1*w16*/\ - "paddd " #A4 ",%%mm3 \n\t"/* +%4*/\ - "pmaddwd %%mm6,%%mm7 \n\t"/* x7*w23+x3*w21 x7*w19+x3*w17*/\ - "pmaddwd 24+" #A3 ",%%mm2 \n\t"/* x6*w15+x2*w13 x6*w11+x2*w09*/\ - "paddd %%mm4,%%mm3 \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\ - "pmaddwd 48+" #A3 ",%%mm5 \n\t"/* x5*w30+x1*w28 x5*w26+x1*w24*/\ - "movq %%mm3,%%mm4 \n\t"/* 4 ; a1 a0*/\ - "pmaddwd 56+" #A3 ",%%mm6 \n\t"/* x7*w31+x3*w29 x7*w27+x3*w25*/\ - "paddd %%mm7,%%mm1 \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\ - "paddd " #A4 ",%%mm0 \n\t"/* +%4*/\ - "psubd %%mm1,%%mm3 \n\t"/* a1-b1 a0-b0*/\ - "psrad $11,%%mm3 \n\t"/* y6=a1-b1 y7=a0-b0*/\ - "paddd %%mm4,%%mm1 \n\t"/* 4 ; a1+b1 a0+b0*/\ - "paddd %%mm2,%%mm0 \n\t"/* 2 ; a3=sum(even3) a2=sum(even2)*/\ - "psrad $11,%%mm1 \n\t"/* y1=a1+b1 y0=a0+b0*/\ - "paddd %%mm6,%%mm5 \n\t"/* 6 ; b3=sum(odd3) b2=sum(odd2)*/\ - "movq %%mm0,%%mm4 \n\t"/* 4 ; a3 a2*/\ - "paddd %%mm5,%%mm0 \n\t"/* a3+b3 a2+b2*/\ - "psubd %%mm5,%%mm4 \n\t"/* 5 ; a3-b3 a2-b2*/\ - "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\ - "psrad $11,%%mm4 \n\t"/* y4=a3-b3 y5=a2-b2*/\ - "packssdw %%mm0,%%mm1 \n\t"/* 0 ; y3 y2 y1 y0*/\ - "packssdw %%mm3,%%mm4 \n\t"/* 3 ; y6 y7 y4 y5*/\ - "movq %%mm4,%%mm7 \n\t"/* 7 ; y6 y7 y4 y5*/\ - "psrld $16,%%mm4 \n\t"/* 0 y6 0 y4*/\ - "pslld $16,%%mm7 \n\t"/* y7 0 y5 0*/\ - "movq %%mm1," #A2 " \n\t"/* 1 ; save y3 y2 y1 y0*/\ - "por %%mm4,%%mm7 \n\t"/* 4 ; y7 y6 y5 y4*/\ - "movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\ - - -//----------------------------------------------------------------------------- -// DCT_8_INV_ROW_XMM( INP, OUT, TABLE, ROUNDER -//----------------------------------------------------------------------------- - -#define DCT_8_INV_ROW_XMM(A1,A2,A3,A4)\ - "movq " #A1 ",%%mm0 \n\t"/* 0 ; x3 x2 x1 x0*/\ - "movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\ - "movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\ - "movq " #A3 ",%%mm3 \n\t"/* 3 ; w05 w04 w01 w00*/\ - "pshufw $0x88,%%mm0,%%mm0 \n\t"/* x2 x0 x2 x0*/\ - "movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w06 w03 w02*/\ - "movq %%mm1,%%mm5 \n\t"/* 5 ; x7 x6 x5 x4*/\ - "pmaddwd %%mm0,%%mm3 \n\t"/* x2*w05+x0*w04 x2*w01+x0*w00*/\ - "movq 32+" #A3 ",%%mm6 \n\t"/* 6 ; w21 w20 w17 w16*/\ - "pshufw $0x88,%%mm1,%%mm1 \n\t"/* x6 x4 x6 x4*/\ - "pmaddwd %%mm1,%%mm4 \n\t"/* x6*w07+x4*w06 x6*w03+x4*w02*/\ - "movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w22 w19 w18*/\ - "pshufw $0xdd,%%mm2,%%mm2 \n\t"/* x3 x1 x3 x1*/\ - "pmaddwd %%mm2,%%mm6 \n\t"/* x3*w21+x1*w20 x3*w17+x1*w16*/\ - "pshufw $0xdd,%%mm5,%%mm5 \n\t"/* x7 x5 x7 x5*/\ - "pmaddwd %%mm5,%%mm7 \n\t"/* x7*w23+x5*w22 x7*w19+x5*w18*/\ - "paddd " #A4 ",%%mm3 \n\t"/* +%4*/\ - "pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x2*w13+x0*w12 x2*w09+x0*w08*/\ - "paddd %%mm4,%%mm3 \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\ - "pmaddwd 24+" #A3 ",%%mm1 \n\t"/* x6*w15+x4*w14 x6*w11+x4*w10*/\ - "movq %%mm3,%%mm4 \n\t"/* 4 ; a1 a0*/\ - "pmaddwd 48+" #A3 ",%%mm2 \n\t"/* x3*w29+x1*w28 x3*w25+x1*w24*/\ - "paddd %%mm7,%%mm6 \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\ - "pmaddwd 56+" #A3 ",%%mm5 \n\t"/* x7*w31+x5*w30 x7*w27+x5*w26*/\ - "paddd %%mm6,%%mm3 \n\t"/* a1+b1 a0+b0*/\ - "paddd " #A4 ",%%mm0 \n\t"/* +%4*/\ - "psrad $11,%%mm3 \n\t"/* y1=a1+b1 y0=a0+b0*/\ - "paddd %%mm1,%%mm0 \n\t"/* 1 ; a3=sum(even3) a2=sum(even2)*/\ - "psubd %%mm6,%%mm4 \n\t"/* 6 ; a1-b1 a0-b0*/\ - "movq %%mm0,%%mm7 \n\t"/* 7 ; a3 a2*/\ - "paddd %%mm5,%%mm2 \n\t"/* 5 ; b3=sum(odd3) b2=sum(odd2)*/\ - "paddd %%mm2,%%mm0 \n\t"/* a3+b3 a2+b2*/\ - "psrad $11,%%mm4 \n\t"/* y6=a1-b1 y7=a0-b0*/\ - "psubd %%mm2,%%mm7 \n\t"/* 2 ; a3-b3 a2-b2*/\ - "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\ - "psrad $11,%%mm7 \n\t"/* y4=a3-b3 y5=a2-b2*/\ - "packssdw %%mm0,%%mm3 \n\t"/* 0 ; y3 y2 y1 y0*/\ - "packssdw %%mm4,%%mm7 \n\t"/* 4 ; y6 y7 y4 y5*/\ - "movq %%mm3, " #A2 " \n\t"/* 3 ; save y3 y2 y1 y0*/\ - "pshufw $0xb1,%%mm7,%%mm7 \n\t"/* y7 y6 y5 y4*/\ - "movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\ - - -//----------------------------------------------------------------------------- -// -// The first stage DCT 8x8 - forward DCTs of columns -// -// The %2puts are multiplied -// for rows 0,4 - on cos_4_16, -// for rows 1,7 - on cos_1_16, -// for rows 2,6 - on cos_2_16, -// for rows 3,5 - on cos_3_16 -// and are shifted to the left for rise of accuracy -// -//----------------------------------------------------------------------------- -// -// The 8-point scaled forward DCT algorithm (26a8m) -// -//----------------------------------------------------------------------------- -// -// #define DCT_8_FRW_COL(x, y) -//{ -// short t0, t1, t2, t3, t4, t5, t6, t7; -// short tp03, tm03, tp12, tm12, tp65, tm65; -// short tp465, tm465, tp765, tm765; -// -// t0 = LEFT_SHIFT ( x[0] + x[7] ); -// t1 = LEFT_SHIFT ( x[1] + x[6] ); -// t2 = LEFT_SHIFT ( x[2] + x[5] ); -// t3 = LEFT_SHIFT ( x[3] + x[4] ); -// t4 = LEFT_SHIFT ( x[3] - x[4] ); -// t5 = LEFT_SHIFT ( x[2] - x[5] ); -// t6 = LEFT_SHIFT ( x[1] - x[6] ); -// t7 = LEFT_SHIFT ( x[0] - x[7] ); -// -// tp03 = t0 + t3; -// tm03 = t0 - t3; -// tp12 = t1 + t2; -// tm12 = t1 - t2; -// -// y[0] = tp03 + tp12; -// y[4] = tp03 - tp12; -// -// y[2] = tm03 + tm12 * tg_2_16; -// y[6] = tm03 * tg_2_16 - tm12; -// -// tp65 =(t6 +t5 )*cos_4_16; -// tm65 =(t6 -t5 )*cos_4_16; -// -// tp765 = t7 + tp65; -// tm765 = t7 - tp65; -// tp465 = t4 + tm65; -// tm465 = t4 - tm65; -// -// y[1] = tp765 + tp465 * tg_1_16; -// y[7] = tp765 * tg_1_16 - tp465; -// y[5] = tm765 * tg_3_16 + tm465; -// y[3] = tm765 - tm465 * tg_3_16; -//} -// -//----------------------------------------------------------------------------- - -//----------------------------------------------------------------------------- -// DCT_8_INV_COL_4 INP,OUT -//----------------------------------------------------------------------------- - -#define DCT_8_INV_COL(A1,A2)\ - "movq 2*8(%3),%%mm0\n\t"\ - "movq 16*3+" #A1 ",%%mm3\n\t"\ - "movq %%mm0,%%mm1 \n\t"/* tg_3_16*/\ - "movq 16*5+" #A1 ",%%mm5\n\t"\ - "pmulhw %%mm3,%%mm0 \n\t"/* x3*(tg_3_16-1)*/\ - "movq (%3),%%mm4\n\t"\ - "pmulhw %%mm5,%%mm1 \n\t"/* x5*(tg_3_16-1)*/\ - "movq 16*7+" #A1 ",%%mm7\n\t"\ - "movq %%mm4,%%mm2 \n\t"/* tg_1_16*/\ - "movq 16*1+" #A1 ",%%mm6\n\t"\ - "pmulhw %%mm7,%%mm4 \n\t"/* x7*tg_1_16*/\ - "paddsw %%mm3,%%mm0 \n\t"/* x3*tg_3_16*/\ - "pmulhw %%mm6,%%mm2 \n\t"/* x1*tg_1_16*/\ - "paddsw %%mm3,%%mm1 \n\t"/* x3+x5*(tg_3_16-1)*/\ - "psubsw %%mm5,%%mm0 \n\t"/* x3*tg_3_16-x5 = tm35*/\ - "movq 3*8(%3),%%mm3\n\t"\ - "paddsw %%mm5,%%mm1 \n\t"/* x3+x5*tg_3_16 = tp35*/\ - "paddsw %%mm6,%%mm4 \n\t"/* x1+tg_1_16*x7 = tp17*/\ - "psubsw %%mm7,%%mm2 \n\t"/* x1*tg_1_16-x7 = tm17*/\ - "movq %%mm4,%%mm5 \n\t"/* tp17*/\ - "movq %%mm2,%%mm6 \n\t"/* tm17*/\ - "paddsw %%mm1,%%mm5 \n\t"/* tp17+tp35 = b0*/\ - "psubsw %%mm0,%%mm6 \n\t"/* tm17-tm35 = b3*/\ - "psubsw %%mm1,%%mm4 \n\t"/* tp17-tp35 = t1*/\ - "paddsw %%mm0,%%mm2 \n\t"/* tm17+tm35 = t2*/\ - "movq 1*8(%3),%%mm7\n\t"\ - "movq %%mm4,%%mm1 \n\t"/* t1*/\ - "movq %%mm5,3*16 +" #A2 "\n\t"/* save b0*/\ - "paddsw %%mm2,%%mm1 \n\t"/* t1+t2*/\ - "movq %%mm6,5*16 +" #A2 "\n\t"/* save b3*/\ - "psubsw %%mm2,%%mm4 \n\t"/* t1-t2*/\ - "movq 2*16+" #A1 ",%%mm5\n\t"\ - "movq %%mm7,%%mm0 \n\t"/* tg_2_16*/\ - "movq 6*16+" #A1 ",%%mm6\n\t"\ - "pmulhw %%mm5,%%mm0 \n\t"/* x2*tg_2_16*/\ - "pmulhw %%mm6,%%mm7 \n\t"/* x6*tg_2_16*/\ - "pmulhw %%mm3,%%mm1 \n\t"/* ocos_4_16*(t1+t2) = b1/2*/\ - "movq 0*16+" #A1 ",%%mm2\n\t"\ - "pmulhw %%mm3,%%mm4 \n\t"/* ocos_4_16*(t1-t2) = b2/2*/\ - "psubsw %%mm6,%%mm0 \n\t"/* t2*tg_2_16-x6 = tm26*/\ - "movq %%mm2,%%mm3 \n\t"/* x0*/\ - "movq 4*16+" #A1 ",%%mm6\n\t"\ - "paddsw %%mm5,%%mm7 \n\t"/* x2+x6*tg_2_16 = tp26*/\ - "paddsw %%mm6,%%mm2 \n\t"/* x0+x4 = tp04*/\ - "psubsw %%mm6,%%mm3 \n\t"/* x0-x4 = tm04*/\ - "movq %%mm2,%%mm5 \n\t"/* tp04*/\ - "movq %%mm3,%%mm6 \n\t"/* tm04*/\ - "psubsw %%mm7,%%mm2 \n\t"/* tp04-tp26 = a3*/\ - "paddsw %%mm0,%%mm3 \n\t"/* tm04+tm26 = a1*/\ - "paddsw %%mm1,%%mm1 \n\t"/* b1*/\ - "paddsw %%mm4,%%mm4 \n\t"/* b2*/\ - "paddsw %%mm7,%%mm5 \n\t"/* tp04+tp26 = a0*/\ - "psubsw %%mm0,%%mm6 \n\t"/* tm04-tm26 = a2*/\ - "movq %%mm3,%%mm7 \n\t"/* a1*/\ - "movq %%mm6,%%mm0 \n\t"/* a2*/\ - "paddsw %%mm1,%%mm3 \n\t"/* a1+b1*/\ - "paddsw %%mm4,%%mm6 \n\t"/* a2+b2*/\ - "psraw $6,%%mm3 \n\t"/* dst1*/\ - "psubsw %%mm1,%%mm7 \n\t"/* a1-b1*/\ - "psraw $6,%%mm6 \n\t"/* dst2*/\ - "psubsw %%mm4,%%mm0 \n\t"/* a2-b2*/\ - "movq 3*16+" #A2 ",%%mm1 \n\t"/* load b0*/\ - "psraw $6,%%mm7 \n\t"/* dst6*/\ - "movq %%mm5,%%mm4 \n\t"/* a0*/\ - "psraw $6,%%mm0 \n\t"/* dst5*/\ - "movq %%mm3,1*16+" #A2 "\n\t"\ - "paddsw %%mm1,%%mm5 \n\t"/* a0+b0*/\ - "movq %%mm6,2*16+" #A2 "\n\t"\ - "psubsw %%mm1,%%mm4 \n\t"/* a0-b0*/\ - "movq 5*16+" #A2 ",%%mm3 \n\t"/* load b3*/\ - "psraw $6,%%mm5 \n\t"/* dst0*/\ - "movq %%mm2,%%mm6 \n\t"/* a3*/\ - "psraw $6,%%mm4 \n\t"/* dst7*/\ - "movq %%mm0,5*16+" #A2 "\n\t"\ - "paddsw %%mm3,%%mm2 \n\t"/* a3+b3*/\ - "movq %%mm7,6*16+" #A2 "\n\t"\ - "psubsw %%mm3,%%mm6 \n\t"/* a3-b3*/\ - "movq %%mm5,0*16+" #A2 "\n\t"\ - "psraw $6,%%mm2 \n\t"/* dst3*/\ - "movq %%mm4,7*16+" #A2 "\n\t"\ - "psraw $6,%%mm6 \n\t"/* dst4*/\ - "movq %%mm2,3*16+" #A2 "\n\t"\ - "movq %%mm6,4*16+" #A2 "\n\t" - -//============================================================================= -// Code -//============================================================================= - -//----------------------------------------------------------------------------- -// void idct_mmx(uint16_t block[64]); -//----------------------------------------------------------------------------- - - -void ff_idct_xvid_mmx(short *block){ -asm volatile( - //# Process each row - DCT_8_INV_ROW_MMX(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1)) - DCT_8_INV_ROW_MMX(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1)) - DCT_8_INV_ROW_MMX(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1)) - DCT_8_INV_ROW_MMX(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1)) - DCT_8_INV_ROW_MMX(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1)) - DCT_8_INV_ROW_MMX(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1)) - DCT_8_INV_ROW_MMX(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1)) - DCT_8_INV_ROW_MMX(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1)) - - //# Process the columns (4 at a time) - DCT_8_INV_COL(0(%0), 0(%0)) - DCT_8_INV_COL(8(%0), 8(%0)) - :: "r"(block), "r"(rounder_0), "r"(tab_i_04_mmx), "r"(tg_1_16)); -} - -//----------------------------------------------------------------------------- -// void idct_xmm(uint16_t block[64]); -//----------------------------------------------------------------------------- - - -void ff_idct_xvid_mmx2(short *block){ -asm volatile( - //# Process each row - DCT_8_INV_ROW_XMM(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1)) - DCT_8_INV_ROW_XMM(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1)) - DCT_8_INV_ROW_XMM(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1)) - DCT_8_INV_ROW_XMM(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1)) - DCT_8_INV_ROW_XMM(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1)) - DCT_8_INV_ROW_XMM(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1)) - DCT_8_INV_ROW_XMM(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1)) - DCT_8_INV_ROW_XMM(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1)) - - //# Process the columns (4 at a time) - DCT_8_INV_COL(0(%0), 0(%0)) - DCT_8_INV_COL(8(%0), 8(%0)) - :: "r"(block), "r"(rounder_0), "r"(tab_i_04_xmm), "r"(tg_1_16)); -} - diff --git a/src/libffmpeg/libavcodec/i386/libavcodec_mmx_dummy.c b/src/libffmpeg/libavcodec/i386/libavcodec_mmx_dummy.c deleted file mode 100644 index 04a6fc298..000000000 --- a/src/libffmpeg/libavcodec/i386/libavcodec_mmx_dummy.c +++ /dev/null @@ -1,2 +0,0 @@ - -char libavcodec_mmx_dummy; diff --git a/src/libffmpeg/libavcodec/i386/mathops.h b/src/libffmpeg/libavcodec/i386/mathops.h deleted file mode 100644 index 3553a4025..000000000 --- a/src/libffmpeg/libavcodec/i386/mathops.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * simple math operations - * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifdef FRAC_BITS -# define MULL(ra, rb) \ - ({ int rt, dummy; asm (\ - "imull %3 \n\t"\ - "shrdl %4, %%edx, %%eax \n\t"\ - : "=a"(rt), "=d"(dummy)\ - : "a" (ra), "rm" (rb), "i"(FRAC_BITS));\ - rt; }) -#endif - -#define MULH(ra, rb) \ - ({ int rt, dummy;\ - asm ("imull %3\n\t" : "=d"(rt), "=a"(dummy): "a" (ra), "rm" (rb));\ - rt; }) - -#define MUL64(ra, rb) \ - ({ int64_t rt;\ - asm ("imull %2\n\t" : "=A"(rt) : "a" (ra), "g" (rb));\ - rt; }) - diff --git a/src/libffmpeg/libavcodec/i386/mmx.h b/src/libffmpeg/libavcodec/i386/mmx.h deleted file mode 100644 index 41aae6c21..000000000 --- a/src/libffmpeg/libavcodec/i386/mmx.h +++ /dev/null @@ -1,283 +0,0 @@ -/* - * mmx.h - * Copyright (C) 1997-2001 H. Dietz and R. Fisher - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#ifndef AVCODEC_I386MMX_H -#define AVCODEC_I386MMX_H - -/* - * The type of an value that fits in an MMX register (note that long - * long constant values MUST be suffixed by LL and unsigned long long - * values by ULL, lest they be truncated by the compiler) - */ - -typedef union { - long long q; /* Quadword (64-bit) value */ - unsigned long long uq; /* Unsigned Quadword */ - int d[2]; /* 2 Doubleword (32-bit) values */ - unsigned int ud[2]; /* 2 Unsigned Doubleword */ - short w[4]; /* 4 Word (16-bit) values */ - unsigned short uw[4]; /* 4 Unsigned Word */ - char b[8]; /* 8 Byte (8-bit) values */ - unsigned char ub[8]; /* 8 Unsigned Byte */ - float s[2]; /* Single-precision (32-bit) value */ -} mmx_t; /* On an 8-byte (64-bit) boundary */ - - -#define mmx_i2r(op,imm,reg) \ - __asm__ __volatile__ (#op " %0, %%" #reg \ - : /* nothing */ \ - : "i" (imm) ) - -#define mmx_m2r(op,mem,reg) \ - __asm__ __volatile__ (#op " %0, %%" #reg \ - : /* nothing */ \ - : "m" (mem)) - -#define mmx_r2m(op,reg,mem) \ - __asm__ __volatile__ (#op " %%" #reg ", %0" \ - : "=m" (mem) \ - : /* nothing */ ) - -#define mmx_r2r(op,regs,regd) \ - __asm__ __volatile__ (#op " %" #regs ", %" #regd) - - -#define emms() __asm__ __volatile__ ("emms") - -#define movd_m2r(var,reg) mmx_m2r (movd, var, reg) -#define movd_r2m(reg,var) mmx_r2m (movd, reg, var) -#define movd_r2r(regs,regd) mmx_r2r (movd, regs, regd) - -#define movq_m2r(var,reg) mmx_m2r (movq, var, reg) -#define movq_r2m(reg,var) mmx_r2m (movq, reg, var) -#define movq_r2r(regs,regd) mmx_r2r (movq, regs, regd) - -#define packssdw_m2r(var,reg) mmx_m2r (packssdw, var, reg) -#define packssdw_r2r(regs,regd) mmx_r2r (packssdw, regs, regd) -#define packsswb_m2r(var,reg) mmx_m2r (packsswb, var, reg) -#define packsswb_r2r(regs,regd) mmx_r2r (packsswb, regs, regd) - -#define packuswb_m2r(var,reg) mmx_m2r (packuswb, var, reg) -#define packuswb_r2r(regs,regd) mmx_r2r (packuswb, regs, regd) - -#define paddb_m2r(var,reg) mmx_m2r (paddb, var, reg) -#define paddb_r2r(regs,regd) mmx_r2r (paddb, regs, regd) -#define paddd_m2r(var,reg) mmx_m2r (paddd, var, reg) -#define paddd_r2r(regs,regd) mmx_r2r (paddd, regs, regd) -#define paddw_m2r(var,reg) mmx_m2r (paddw, var, reg) -#define paddw_r2r(regs,regd) mmx_r2r (paddw, regs, regd) - -#define paddsb_m2r(var,reg) mmx_m2r (paddsb, var, reg) -#define paddsb_r2r(regs,regd) mmx_r2r (paddsb, regs, regd) -#define paddsw_m2r(var,reg) mmx_m2r (paddsw, var, reg) -#define paddsw_r2r(regs,regd) mmx_r2r (paddsw, regs, regd) - -#define paddusb_m2r(var,reg) mmx_m2r (paddusb, var, reg) -#define paddusb_r2r(regs,regd) mmx_r2r (paddusb, regs, regd) -#define paddusw_m2r(var,reg) mmx_m2r (paddusw, var, reg) -#define paddusw_r2r(regs,regd) mmx_r2r (paddusw, regs, regd) - -#define pand_m2r(var,reg) mmx_m2r (pand, var, reg) -#define pand_r2r(regs,regd) mmx_r2r (pand, regs, regd) - -#define pandn_m2r(var,reg) mmx_m2r (pandn, var, reg) -#define pandn_r2r(regs,regd) mmx_r2r (pandn, regs, regd) - -#define pcmpeqb_m2r(var,reg) mmx_m2r (pcmpeqb, var, reg) -#define pcmpeqb_r2r(regs,regd) mmx_r2r (pcmpeqb, regs, regd) -#define pcmpeqd_m2r(var,reg) mmx_m2r (pcmpeqd, var, reg) -#define pcmpeqd_r2r(regs,regd) mmx_r2r (pcmpeqd, regs, regd) -#define pcmpeqw_m2r(var,reg) mmx_m2r (pcmpeqw, var, reg) -#define pcmpeqw_r2r(regs,regd) mmx_r2r (pcmpeqw, regs, regd) - -#define pcmpgtb_m2r(var,reg) mmx_m2r (pcmpgtb, var, reg) -#define pcmpgtb_r2r(regs,regd) mmx_r2r (pcmpgtb, regs, regd) -#define pcmpgtd_m2r(var,reg) mmx_m2r (pcmpgtd, var, reg) -#define pcmpgtd_r2r(regs,regd) mmx_r2r (pcmpgtd, regs, regd) -#define pcmpgtw_m2r(var,reg) mmx_m2r (pcmpgtw, var, reg) -#define pcmpgtw_r2r(regs,regd) mmx_r2r (pcmpgtw, regs, regd) - -#define pmaddwd_m2r(var,reg) mmx_m2r (pmaddwd, var, reg) -#define pmaddwd_r2r(regs,regd) mmx_r2r (pmaddwd, regs, regd) - -#define pmulhw_m2r(var,reg) mmx_m2r (pmulhw, var, reg) -#define pmulhw_r2r(regs,regd) mmx_r2r (pmulhw, regs, regd) - -#define pmullw_m2r(var,reg) mmx_m2r (pmullw, var, reg) -#define pmullw_r2r(regs,regd) mmx_r2r (pmullw, regs, regd) - -#define por_m2r(var,reg) mmx_m2r (por, var, reg) -#define por_r2r(regs,regd) mmx_r2r (por, regs, regd) - -#define pslld_i2r(imm,reg) mmx_i2r (pslld, imm, reg) -#define pslld_m2r(var,reg) mmx_m2r (pslld, var, reg) -#define pslld_r2r(regs,regd) mmx_r2r (pslld, regs, regd) -#define psllq_i2r(imm,reg) mmx_i2r (psllq, imm, reg) -#define psllq_m2r(var,reg) mmx_m2r (psllq, var, reg) -#define psllq_r2r(regs,regd) mmx_r2r (psllq, regs, regd) -#define psllw_i2r(imm,reg) mmx_i2r (psllw, imm, reg) -#define psllw_m2r(var,reg) mmx_m2r (psllw, var, reg) -#define psllw_r2r(regs,regd) mmx_r2r (psllw, regs, regd) - -#define psrad_i2r(imm,reg) mmx_i2r (psrad, imm, reg) -#define psrad_m2r(var,reg) mmx_m2r (psrad, var, reg) -#define psrad_r2r(regs,regd) mmx_r2r (psrad, regs, regd) -#define psraw_i2r(imm,reg) mmx_i2r (psraw, imm, reg) -#define psraw_m2r(var,reg) mmx_m2r (psraw, var, reg) -#define psraw_r2r(regs,regd) mmx_r2r (psraw, regs, regd) - -#define psrld_i2r(imm,reg) mmx_i2r (psrld, imm, reg) -#define psrld_m2r(var,reg) mmx_m2r (psrld, var, reg) -#define psrld_r2r(regs,regd) mmx_r2r (psrld, regs, regd) -#define psrlq_i2r(imm,reg) mmx_i2r (psrlq, imm, reg) -#define psrlq_m2r(var,reg) mmx_m2r (psrlq, var, reg) -#define psrlq_r2r(regs,regd) mmx_r2r (psrlq, regs, regd) -#define psrlw_i2r(imm,reg) mmx_i2r (psrlw, imm, reg) -#define psrlw_m2r(var,reg) mmx_m2r (psrlw, var, reg) -#define psrlw_r2r(regs,regd) mmx_r2r (psrlw, regs, regd) - -#define psubb_m2r(var,reg) mmx_m2r (psubb, var, reg) -#define psubb_r2r(regs,regd) mmx_r2r (psubb, regs, regd) -#define psubd_m2r(var,reg) mmx_m2r (psubd, var, reg) -#define psubd_r2r(regs,regd) mmx_r2r (psubd, regs, regd) -#define psubw_m2r(var,reg) mmx_m2r (psubw, var, reg) -#define psubw_r2r(regs,regd) mmx_r2r (psubw, regs, regd) - -#define psubsb_m2r(var,reg) mmx_m2r (psubsb, var, reg) -#define psubsb_r2r(regs,regd) mmx_r2r (psubsb, regs, regd) -#define psubsw_m2r(var,reg) mmx_m2r (psubsw, var, reg) -#define psubsw_r2r(regs,regd) mmx_r2r (psubsw, regs, regd) - -#define psubusb_m2r(var,reg) mmx_m2r (psubusb, var, reg) -#define psubusb_r2r(regs,regd) mmx_r2r (psubusb, regs, regd) -#define psubusw_m2r(var,reg) mmx_m2r (psubusw, var, reg) -#define psubusw_r2r(regs,regd) mmx_r2r (psubusw, regs, regd) - -#define punpckhbw_m2r(var,reg) mmx_m2r (punpckhbw, var, reg) -#define punpckhbw_r2r(regs,regd) mmx_r2r (punpckhbw, regs, regd) -#define punpckhdq_m2r(var,reg) mmx_m2r (punpckhdq, var, reg) -#define punpckhdq_r2r(regs,regd) mmx_r2r (punpckhdq, regs, regd) -#define punpckhwd_m2r(var,reg) mmx_m2r (punpckhwd, var, reg) -#define punpckhwd_r2r(regs,regd) mmx_r2r (punpckhwd, regs, regd) - -#define punpcklbw_m2r(var,reg) mmx_m2r (punpcklbw, var, reg) -#define punpcklbw_r2r(regs,regd) mmx_r2r (punpcklbw, regs, regd) -#define punpckldq_m2r(var,reg) mmx_m2r (punpckldq, var, reg) -#define punpckldq_r2r(regs,regd) mmx_r2r (punpckldq, regs, regd) -#define punpcklwd_m2r(var,reg) mmx_m2r (punpcklwd, var, reg) -#define punpcklwd_r2r(regs,regd) mmx_r2r (punpcklwd, regs, regd) - -#define pxor_m2r(var,reg) mmx_m2r (pxor, var, reg) -#define pxor_r2r(regs,regd) mmx_r2r (pxor, regs, regd) - - -/* 3DNOW extensions */ - -#define pavgusb_m2r(var,reg) mmx_m2r (pavgusb, var, reg) -#define pavgusb_r2r(regs,regd) mmx_r2r (pavgusb, regs, regd) - - -/* AMD MMX extensions - also available in intel SSE */ - - -#define mmx_m2ri(op,mem,reg,imm) \ - __asm__ __volatile__ (#op " %1, %0, %%" #reg \ - : /* nothing */ \ - : "m" (mem), "i" (imm)) -#define mmx_r2ri(op,regs,regd,imm) \ - __asm__ __volatile__ (#op " %0, %%" #regs ", %%" #regd \ - : /* nothing */ \ - : "i" (imm) ) - -#define mmx_fetch(mem,hint) \ - __asm__ __volatile__ ("prefetch" #hint " %0" \ - : /* nothing */ \ - : "m" (mem)) - - -#define maskmovq(regs,maskreg) mmx_r2ri (maskmovq, regs, maskreg) - -#define movntq_r2m(mmreg,var) mmx_r2m (movntq, mmreg, var) - -#define pavgb_m2r(var,reg) mmx_m2r (pavgb, var, reg) -#define pavgb_r2r(regs,regd) mmx_r2r (pavgb, regs, regd) -#define pavgw_m2r(var,reg) mmx_m2r (pavgw, var, reg) -#define pavgw_r2r(regs,regd) mmx_r2r (pavgw, regs, regd) - -#define pextrw_r2r(mmreg,reg,imm) mmx_r2ri (pextrw, mmreg, reg, imm) - -#define pinsrw_r2r(reg,mmreg,imm) mmx_r2ri (pinsrw, reg, mmreg, imm) - -#define pmaxsw_m2r(var,reg) mmx_m2r (pmaxsw, var, reg) -#define pmaxsw_r2r(regs,regd) mmx_r2r (pmaxsw, regs, regd) - -#define pmaxub_m2r(var,reg) mmx_m2r (pmaxub, var, reg) -#define pmaxub_r2r(regs,regd) mmx_r2r (pmaxub, regs, regd) - -#define pminsw_m2r(var,reg) mmx_m2r (pminsw, var, reg) -#define pminsw_r2r(regs,regd) mmx_r2r (pminsw, regs, regd) - -#define pminub_m2r(var,reg) mmx_m2r (pminub, var, reg) -#define pminub_r2r(regs,regd) mmx_r2r (pminub, regs, regd) - -#define pmovmskb(mmreg,reg) \ - __asm__ __volatile__ ("movmskps %" #mmreg ", %" #reg) - -#define pmulhuw_m2r(var,reg) mmx_m2r (pmulhuw, var, reg) -#define pmulhuw_r2r(regs,regd) mmx_r2r (pmulhuw, regs, regd) - -#define prefetcht0(mem) mmx_fetch (mem, t0) -#define prefetcht1(mem) mmx_fetch (mem, t1) -#define prefetcht2(mem) mmx_fetch (mem, t2) -#define prefetchnta(mem) mmx_fetch (mem, nta) - -#define psadbw_m2r(var,reg) mmx_m2r (psadbw, var, reg) -#define psadbw_r2r(regs,regd) mmx_r2r (psadbw, regs, regd) - -#define pshufw_m2r(var,reg,imm) mmx_m2ri(pshufw, var, reg, imm) -#define pshufw_r2r(regs,regd,imm) mmx_r2ri(pshufw, regs, regd, imm) - -#define sfence() __asm__ __volatile__ ("sfence\n\t") - -/* SSE2 */ -#define pshufhw_m2r(var,reg,imm) mmx_m2ri(pshufhw, var, reg, imm) -#define pshufhw_r2r(regs,regd,imm) mmx_r2ri(pshufhw, regs, regd, imm) -#define pshuflw_m2r(var,reg,imm) mmx_m2ri(pshuflw, var, reg, imm) -#define pshuflw_r2r(regs,regd,imm) mmx_r2ri(pshuflw, regs, regd, imm) - -#define pshufd_r2r(regs,regd,imm) mmx_r2ri(pshufd, regs, regd, imm) - -#define movdqa_m2r(var,reg) mmx_m2r (movdqa, var, reg) -#define movdqa_r2m(reg,var) mmx_r2m (movdqa, reg, var) -#define movdqa_r2r(regs,regd) mmx_r2r (movdqa, regs, regd) -#define movdqu_m2r(var,reg) mmx_m2r (movdqu, var, reg) -#define movdqu_r2m(reg,var) mmx_r2m (movdqu, reg, var) -#define movdqu_r2r(regs,regd) mmx_r2r (movdqu, regs, regd) - -#define pmullw_r2m(reg,var) mmx_r2m (pmullw, reg, var) - -#define pslldq_i2r(imm,reg) mmx_i2r (pslldq, imm, reg) -#define psrldq_i2r(imm,reg) mmx_i2r (psrldq, imm, reg) - -#define punpcklqdq_r2r(regs,regd) mmx_r2r (punpcklqdq, regs, regd) -#define punpckhqdq_r2r(regs,regd) mmx_r2r (punpckhqdq, regs, regd) - - -#endif /* AVCODEC_I386MMX_H */ diff --git a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c b/src/libffmpeg/libavcodec/i386/motion_est_mmx.c deleted file mode 100644 index e33870e0f..000000000 --- a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c +++ /dev/null @@ -1,408 +0,0 @@ -/* - * MMX optimized motion estimation - * Copyright (c) 2001 Fabrice Bellard. - * Copyright (c) 2002-2004 Michael Niedermayer - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * mostly by Michael Niedermayer <michaelni@gmx.at> - */ -#include "../dsputil.h" -#include "x86_cpu.h" - -static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={ -0x0000000000000000ULL, -0x0001000100010001ULL, -0x0002000200020002ULL, -}; - -static attribute_used __attribute__ ((aligned(8))) uint64_t bone= 0x0101010101010101LL; - -static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) -{ - long len= -(stride*h); - asm volatile( - ASMALIGN(4) - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq (%2, %%"REG_a"), %%mm2 \n\t" - "movq (%2, %%"REG_a"), %%mm4 \n\t" - "add %3, %%"REG_a" \n\t" - "psubusb %%mm0, %%mm2 \n\t" - "psubusb %%mm4, %%mm0 \n\t" - "movq (%1, %%"REG_a"), %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "movq (%2, %%"REG_a"), %%mm5 \n\t" - "psubusb %%mm1, %%mm3 \n\t" - "psubusb %%mm5, %%mm1 \n\t" - "por %%mm2, %%mm0 \n\t" - "por %%mm1, %%mm3 \n\t" - "movq %%mm0, %%mm1 \n\t" - "movq %%mm3, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm3 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "paddw %%mm1, %%mm0 \n\t" - "paddw %%mm3, %%mm2 \n\t" - "paddw %%mm2, %%mm0 \n\t" - "paddw %%mm0, %%mm6 \n\t" - "add %3, %%"REG_a" \n\t" - " js 1b \n\t" - : "+a" (len) - : "r" (blk1 - len), "r" (blk2 - len), "r" ((long)stride) - ); -} - -static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) -{ - long len= -(stride*h); - asm volatile( - ASMALIGN(4) - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq (%2, %%"REG_a"), %%mm2 \n\t" - "psadbw %%mm2, %%mm0 \n\t" - "add %3, %%"REG_a" \n\t" - "movq (%1, %%"REG_a"), %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "psadbw %%mm1, %%mm3 \n\t" - "paddw %%mm3, %%mm0 \n\t" - "paddw %%mm0, %%mm6 \n\t" - "add %3, %%"REG_a" \n\t" - " js 1b \n\t" - : "+a" (len) - : "r" (blk1 - len), "r" (blk2 - len), "r" ((long)stride) - ); -} - -static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) -{ - long len= -(stride*h); - asm volatile( - ASMALIGN(4) - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq (%2, %%"REG_a"), %%mm2 \n\t" - "pavgb %%mm2, %%mm0 \n\t" - "movq (%3, %%"REG_a"), %%mm2 \n\t" - "psadbw %%mm2, %%mm0 \n\t" - "add %4, %%"REG_a" \n\t" - "movq (%1, %%"REG_a"), %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "pavgb %%mm1, %%mm3 \n\t" - "movq (%3, %%"REG_a"), %%mm1 \n\t" - "psadbw %%mm1, %%mm3 \n\t" - "paddw %%mm3, %%mm0 \n\t" - "paddw %%mm0, %%mm6 \n\t" - "add %4, %%"REG_a" \n\t" - " js 1b \n\t" - : "+a" (len) - : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((long)stride) - ); -} - -static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h) -{ //FIXME reuse src - long len= -(stride*h); - asm volatile( - ASMALIGN(4) - "movq "MANGLE(bone)", %%mm5 \n\t" - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq (%2, %%"REG_a"), %%mm2 \n\t" - "movq 1(%1, %%"REG_a"), %%mm1 \n\t" - "movq 1(%2, %%"REG_a"), %%mm3 \n\t" - "pavgb %%mm2, %%mm0 \n\t" - "pavgb %%mm1, %%mm3 \n\t" - "psubusb %%mm5, %%mm3 \n\t" - "pavgb %%mm3, %%mm0 \n\t" - "movq (%3, %%"REG_a"), %%mm2 \n\t" - "psadbw %%mm2, %%mm0 \n\t" - "add %4, %%"REG_a" \n\t" - "movq (%1, %%"REG_a"), %%mm1 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" - "movq 1(%2, %%"REG_a"), %%mm4 \n\t" - "pavgb %%mm3, %%mm1 \n\t" - "pavgb %%mm4, %%mm2 \n\t" - "psubusb %%mm5, %%mm2 \n\t" - "pavgb %%mm1, %%mm2 \n\t" - "movq (%3, %%"REG_a"), %%mm1 \n\t" - "psadbw %%mm1, %%mm2 \n\t" - "paddw %%mm2, %%mm0 \n\t" - "paddw %%mm0, %%mm6 \n\t" - "add %4, %%"REG_a" \n\t" - " js 1b \n\t" - : "+a" (len) - : "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len), "r" ((long)stride) - ); -} - -static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h) -{ - long len= -(stride*h); - asm volatile( - ASMALIGN(4) - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq (%2, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm2 \n\t" - "movq (%2, %%"REG_a"), %%mm3 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm3 \n\t" - "paddw %%mm0, %%mm1 \n\t" - "paddw %%mm2, %%mm3 \n\t" - "movq (%3, %%"REG_a"), %%mm4 \n\t" - "movq (%3, %%"REG_a"), %%mm2 \n\t" - "paddw %%mm5, %%mm1 \n\t" - "paddw %%mm5, %%mm3 \n\t" - "psrlw $1, %%mm1 \n\t" - "psrlw $1, %%mm3 \n\t" - "packuswb %%mm3, %%mm1 \n\t" - "psubusb %%mm1, %%mm4 \n\t" - "psubusb %%mm2, %%mm1 \n\t" - "por %%mm4, %%mm1 \n\t" - "movq %%mm1, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "paddw %%mm1, %%mm0 \n\t" - "paddw %%mm0, %%mm6 \n\t" - "add %4, %%"REG_a" \n\t" - " js 1b \n\t" - : "+a" (len) - : "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" ((long)stride) - ); -} - -static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h) -{ - long len= -(stride*h); - asm volatile( - ASMALIGN(4) - "1: \n\t" - "movq (%1, %%"REG_a"), %%mm0 \n\t" - "movq (%2, %%"REG_a"), %%mm1 \n\t" - "movq %%mm0, %%mm4 \n\t" - "movq %%mm1, %%mm2 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm1 \n\t" - "punpckhbw %%mm7, %%mm4 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "paddw %%mm1, %%mm0 \n\t" - "paddw %%mm2, %%mm4 \n\t" - "movq 1(%1, %%"REG_a"), %%mm2 \n\t" - "movq 1(%2, %%"REG_a"), %%mm3 \n\t" - "movq %%mm2, %%mm1 \n\t" - "punpcklbw %%mm7, %%mm2 \n\t" - "punpckhbw %%mm7, %%mm1 \n\t" - "paddw %%mm0, %%mm2 \n\t" - "paddw %%mm4, %%mm1 \n\t" - "movq %%mm3, %%mm4 \n\t" - "punpcklbw %%mm7, %%mm3 \n\t" - "punpckhbw %%mm7, %%mm4 \n\t" - "paddw %%mm3, %%mm2 \n\t" - "paddw %%mm4, %%mm1 \n\t" - "movq (%3, %%"REG_a"), %%mm3 \n\t" - "movq (%3, %%"REG_a"), %%mm4 \n\t" - "paddw %%mm5, %%mm2 \n\t" - "paddw %%mm5, %%mm1 \n\t" - "psrlw $2, %%mm2 \n\t" - "psrlw $2, %%mm1 \n\t" - "packuswb %%mm1, %%mm2 \n\t" - "psubusb %%mm2, %%mm3 \n\t" - "psubusb %%mm4, %%mm2 \n\t" - "por %%mm3, %%mm2 \n\t" - "movq %%mm2, %%mm0 \n\t" - "punpcklbw %%mm7, %%mm0 \n\t" - "punpckhbw %%mm7, %%mm2 \n\t" - "paddw %%mm2, %%mm0 \n\t" - "paddw %%mm0, %%mm6 \n\t" - "add %4, %%"REG_a" \n\t" - " js 1b \n\t" - : "+a" (len) - : "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" ((long)stride) - ); -} - -static inline int sum_mmx(void) -{ - int ret; - asm volatile( - "movq %%mm6, %%mm0 \n\t" - "psrlq $32, %%mm6 \n\t" - "paddw %%mm0, %%mm6 \n\t" - "movq %%mm6, %%mm0 \n\t" - "psrlq $16, %%mm6 \n\t" - "paddw %%mm0, %%mm6 \n\t" - "movd %%mm6, %0 \n\t" - : "=r" (ret) - ); - return ret&0xFFFF; -} - -static inline int sum_mmx2(void) -{ - int ret; - asm volatile( - "movd %%mm6, %0 \n\t" - : "=r" (ret) - ); - return ret; -} - - -#define PIX_SAD(suf)\ -static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ -{\ - assert(h==8);\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t":);\ -\ - sad8_1_ ## suf(blk1, blk2, stride, 8);\ -\ - return sum_ ## suf();\ -}\ -static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ -{\ - assert(h==8);\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "movq %0, %%mm5 \n\t"\ - :: "m"(round_tab[1]) \ - );\ -\ - sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 8);\ -\ - return sum_ ## suf();\ -}\ -\ -static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ -{\ - assert(h==8);\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "movq %0, %%mm5 \n\t"\ - :: "m"(round_tab[1]) \ - );\ -\ - sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 8);\ -\ - return sum_ ## suf();\ -}\ -\ -static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ -{\ - assert(h==8);\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "movq %0, %%mm5 \n\t"\ - :: "m"(round_tab[2]) \ - );\ -\ - sad8_4_ ## suf(blk1, blk2, stride, 8);\ -\ - return sum_ ## suf();\ -}\ -\ -static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ -{\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t":);\ -\ - sad8_1_ ## suf(blk1 , blk2 , stride, h);\ - sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\ -\ - return sum_ ## suf();\ -}\ -static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ -{\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "movq %0, %%mm5 \n\t"\ - :: "m"(round_tab[1]) \ - );\ -\ - sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, h);\ - sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, h);\ -\ - return sum_ ## suf();\ -}\ -static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ -{\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "movq %0, %%mm5 \n\t"\ - :: "m"(round_tab[1]) \ - );\ -\ - sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, h);\ - sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, h);\ -\ - return sum_ ## suf();\ -}\ -static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\ -{\ - asm volatile("pxor %%mm7, %%mm7 \n\t"\ - "pxor %%mm6, %%mm6 \n\t"\ - "movq %0, %%mm5 \n\t"\ - :: "m"(round_tab[2]) \ - );\ -\ - sad8_4_ ## suf(blk1 , blk2 , stride, h);\ - sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\ -\ - return sum_ ## suf();\ -}\ - -PIX_SAD(mmx) -PIX_SAD(mmx2) - -void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx) -{ - if (mm_flags & MM_MMX) { - c->pix_abs[0][0] = sad16_mmx; - c->pix_abs[0][1] = sad16_x2_mmx; - c->pix_abs[0][2] = sad16_y2_mmx; - c->pix_abs[0][3] = sad16_xy2_mmx; - c->pix_abs[1][0] = sad8_mmx; - c->pix_abs[1][1] = sad8_x2_mmx; - c->pix_abs[1][2] = sad8_y2_mmx; - c->pix_abs[1][3] = sad8_xy2_mmx; - - c->sad[0]= sad16_mmx; - c->sad[1]= sad8_mmx; - } - if (mm_flags & MM_MMXEXT) { - c->pix_abs[0][0] = sad16_mmx2; - c->pix_abs[1][0] = sad8_mmx2; - - c->sad[0]= sad16_mmx2; - c->sad[1]= sad8_mmx2; - - if(!(avctx->flags & CODEC_FLAG_BITEXACT)){ - c->pix_abs[0][1] = sad16_x2_mmx2; - c->pix_abs[0][2] = sad16_y2_mmx2; - c->pix_abs[0][3] = sad16_xy2_mmx2; - c->pix_abs[1][1] = sad8_x2_mmx2; - c->pix_abs[1][2] = sad8_y2_mmx2; - c->pix_abs[1][3] = sad8_xy2_mmx2; - } - } -} diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c deleted file mode 100644 index 1b7b1c19f..000000000 --- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c +++ /dev/null @@ -1,725 +0,0 @@ -/* - * The simplest mpeg encoder (well, it was the simplest!) - * Copyright (c) 2000,2001 Fabrice Bellard. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * Optimized for ia32 cpus by Nick Kurshev <nickols_k@mail.ru> - * h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at> - */ - -#include "../dsputil.h" -#include "../mpegvideo.h" -#include "../avcodec.h" -#include "x86_cpu.h" - -extern uint16_t inv_zigzag_direct16[64]; - -static const unsigned long long int mm_wabs __attribute__ ((aligned(8))) = 0xffffffffffffffffULL; -static const unsigned long long int mm_wone __attribute__ ((aligned(8))) = 0x0001000100010001ULL; - - -static void dct_unquantize_h263_intra_mmx(MpegEncContext *s, - DCTELEM *block, int n, int qscale) -{ - long level, qmul, qadd, nCoeffs; - - qmul = qscale << 1; - - assert(s->block_last_index[n]>=0 || s->h263_aic); - - if (!s->h263_aic) { - if (n < 4) - level = block[0] * s->y_dc_scale; - else - level = block[0] * s->c_dc_scale; - qadd = (qscale - 1) | 1; - }else{ - qadd = 0; - level= block[0]; - } - if(s->ac_pred) - nCoeffs=63; - else - nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; -//printf("%d %d ", qmul, qadd); -asm volatile( - "movd %1, %%mm6 \n\t" //qmul - "packssdw %%mm6, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "movd %2, %%mm5 \n\t" //qadd - "pxor %%mm7, %%mm7 \n\t" - "packssdw %%mm5, %%mm5 \n\t" - "packssdw %%mm5, %%mm5 \n\t" - "psubw %%mm5, %%mm7 \n\t" - "pxor %%mm4, %%mm4 \n\t" - ASMALIGN(4) - "1: \n\t" - "movq (%0, %3), %%mm0 \n\t" - "movq 8(%0, %3), %%mm1 \n\t" - - "pmullw %%mm6, %%mm0 \n\t" - "pmullw %%mm6, %%mm1 \n\t" - - "movq (%0, %3), %%mm2 \n\t" - "movq 8(%0, %3), %%mm3 \n\t" - - "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 - "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 - - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - - "paddw %%mm7, %%mm0 \n\t" - "paddw %%mm7, %%mm1 \n\t" - - "pxor %%mm0, %%mm2 \n\t" - "pxor %%mm1, %%mm3 \n\t" - - "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0 - - "pandn %%mm2, %%mm0 \n\t" - "pandn %%mm3, %%mm1 \n\t" - - "movq %%mm0, (%0, %3) \n\t" - "movq %%mm1, 8(%0, %3) \n\t" - - "add $16, %3 \n\t" - "jng 1b \n\t" - ::"r" (block+nCoeffs), "g"(qmul), "g" (qadd), "r" (2*(-nCoeffs)) - : "memory" - ); - block[0]= level; -} - - -static void dct_unquantize_h263_inter_mmx(MpegEncContext *s, - DCTELEM *block, int n, int qscale) -{ - long qmul, qadd, nCoeffs; - - qmul = qscale << 1; - qadd = (qscale - 1) | 1; - - assert(s->block_last_index[n]>=0 || s->h263_aic); - - nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ]; -//printf("%d %d ", qmul, qadd); -asm volatile( - "movd %1, %%mm6 \n\t" //qmul - "packssdw %%mm6, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "movd %2, %%mm5 \n\t" //qadd - "pxor %%mm7, %%mm7 \n\t" - "packssdw %%mm5, %%mm5 \n\t" - "packssdw %%mm5, %%mm5 \n\t" - "psubw %%mm5, %%mm7 \n\t" - "pxor %%mm4, %%mm4 \n\t" - ASMALIGN(4) - "1: \n\t" - "movq (%0, %3), %%mm0 \n\t" - "movq 8(%0, %3), %%mm1 \n\t" - - "pmullw %%mm6, %%mm0 \n\t" - "pmullw %%mm6, %%mm1 \n\t" - - "movq (%0, %3), %%mm2 \n\t" - "movq 8(%0, %3), %%mm3 \n\t" - - "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 - "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 - - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - - "paddw %%mm7, %%mm0 \n\t" - "paddw %%mm7, %%mm1 \n\t" - - "pxor %%mm0, %%mm2 \n\t" - "pxor %%mm1, %%mm3 \n\t" - - "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0 - - "pandn %%mm2, %%mm0 \n\t" - "pandn %%mm3, %%mm1 \n\t" - - "movq %%mm0, (%0, %3) \n\t" - "movq %%mm1, 8(%0, %3) \n\t" - - "add $16, %3 \n\t" - "jng 1b \n\t" - ::"r" (block+nCoeffs), "g"(qmul), "g" (qadd), "r" (2*(-nCoeffs)) - : "memory" - ); -} - - -/* - NK: - Note: looking at PARANOID: - "enable all paranoid tests for rounding, overflows, etc..." - -#ifdef PARANOID - if (level < -2048 || level > 2047) - fprintf(stderr, "unquant error %d %d\n", i, level); -#endif - We can suppose that result of two multiplications can't be greate of 0xFFFF - i.e. is 16-bit, so we use here only PMULLW instruction and can avoid - a complex multiplication. -===================================================== - Full formula for multiplication of 2 integer numbers - which are represent as high:low words: - input: value1 = high1:low1 - value2 = high2:low2 - output: value3 = value1*value2 - value3=high3:low3 (on overflow: modulus 2^32 wrap-around) - this mean that for 0x123456 * 0x123456 correct result is 0x766cb0ce4 - but this algorithm will compute only 0x66cb0ce4 - this limited by 16-bit size of operands - --------------------------------- - tlow1 = high1*low2 - tlow2 = high2*low1 - tlow1 = tlow1 + tlow2 - high3:low3 = low1*low2 - high3 += tlow1 -*/ -static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s, - DCTELEM *block, int n, int qscale) -{ - long nCoeffs; - const uint16_t *quant_matrix; - int block0; - - assert(s->block_last_index[n]>=0); - - nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1; - - if (n < 4) - block0 = block[0] * s->y_dc_scale; - else - block0 = block[0] * s->c_dc_scale; - /* XXX: only mpeg1 */ - quant_matrix = s->intra_matrix; -asm volatile( - "pcmpeqw %%mm7, %%mm7 \n\t" - "psrlw $15, %%mm7 \n\t" - "movd %2, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "mov %3, %%"REG_a" \n\t" - ASMALIGN(4) - "1: \n\t" - "movq (%0, %%"REG_a"), %%mm0 \n\t" - "movq 8(%0, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm4 \n\t" - "movq 8(%1, %%"REG_a"), %%mm5 \n\t" - "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] - "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] - "pxor %%mm2, %%mm2 \n\t" - "pxor %%mm3, %%mm3 \n\t" - "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 - "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - "psubw %%mm2, %%mm0 \n\t" // abs(block[i]) - "psubw %%mm3, %%mm1 \n\t" // abs(block[i]) - "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q - "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q - "pxor %%mm4, %%mm4 \n\t" - "pxor %%mm5, %%mm5 \n\t" // FIXME slow - "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 - "psraw $3, %%mm0 \n\t" - "psraw $3, %%mm1 \n\t" - "psubw %%mm7, %%mm0 \n\t" - "psubw %%mm7, %%mm1 \n\t" - "por %%mm7, %%mm0 \n\t" - "por %%mm7, %%mm1 \n\t" - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - "psubw %%mm2, %%mm0 \n\t" - "psubw %%mm3, %%mm1 \n\t" - "pandn %%mm0, %%mm4 \n\t" - "pandn %%mm1, %%mm5 \n\t" - "movq %%mm4, (%0, %%"REG_a") \n\t" - "movq %%mm5, 8(%0, %%"REG_a") \n\t" - - "add $16, %%"REG_a" \n\t" - "js 1b \n\t" - ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" - ); - block[0]= block0; -} - -static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s, - DCTELEM *block, int n, int qscale) -{ - long nCoeffs; - const uint16_t *quant_matrix; - - assert(s->block_last_index[n]>=0); - - nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1; - - quant_matrix = s->inter_matrix; -asm volatile( - "pcmpeqw %%mm7, %%mm7 \n\t" - "psrlw $15, %%mm7 \n\t" - "movd %2, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "mov %3, %%"REG_a" \n\t" - ASMALIGN(4) - "1: \n\t" - "movq (%0, %%"REG_a"), %%mm0 \n\t" - "movq 8(%0, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm4 \n\t" - "movq 8(%1, %%"REG_a"), %%mm5 \n\t" - "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] - "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] - "pxor %%mm2, %%mm2 \n\t" - "pxor %%mm3, %%mm3 \n\t" - "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 - "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - "psubw %%mm2, %%mm0 \n\t" // abs(block[i]) - "psubw %%mm3, %%mm1 \n\t" // abs(block[i]) - "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2 - "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2 - "paddw %%mm7, %%mm0 \n\t" // abs(block[i])*2 + 1 - "paddw %%mm7, %%mm1 \n\t" // abs(block[i])*2 + 1 - "pmullw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q - "pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q - "pxor %%mm4, %%mm4 \n\t" - "pxor %%mm5, %%mm5 \n\t" // FIXME slow - "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 - "psraw $4, %%mm0 \n\t" - "psraw $4, %%mm1 \n\t" - "psubw %%mm7, %%mm0 \n\t" - "psubw %%mm7, %%mm1 \n\t" - "por %%mm7, %%mm0 \n\t" - "por %%mm7, %%mm1 \n\t" - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - "psubw %%mm2, %%mm0 \n\t" - "psubw %%mm3, %%mm1 \n\t" - "pandn %%mm0, %%mm4 \n\t" - "pandn %%mm1, %%mm5 \n\t" - "movq %%mm4, (%0, %%"REG_a") \n\t" - "movq %%mm5, 8(%0, %%"REG_a") \n\t" - - "add $16, %%"REG_a" \n\t" - "js 1b \n\t" - ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" - ); -} - -static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s, - DCTELEM *block, int n, int qscale) -{ - long nCoeffs; - const uint16_t *quant_matrix; - int block0; - - assert(s->block_last_index[n]>=0); - - if(s->alternate_scan) nCoeffs= 63; //FIXME - else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; - - if (n < 4) - block0 = block[0] * s->y_dc_scale; - else - block0 = block[0] * s->c_dc_scale; - quant_matrix = s->intra_matrix; -asm volatile( - "pcmpeqw %%mm7, %%mm7 \n\t" - "psrlw $15, %%mm7 \n\t" - "movd %2, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "mov %3, %%"REG_a" \n\t" - ASMALIGN(4) - "1: \n\t" - "movq (%0, %%"REG_a"), %%mm0 \n\t" - "movq 8(%0, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm4 \n\t" - "movq 8(%1, %%"REG_a"), %%mm5 \n\t" - "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] - "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] - "pxor %%mm2, %%mm2 \n\t" - "pxor %%mm3, %%mm3 \n\t" - "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 - "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - "psubw %%mm2, %%mm0 \n\t" // abs(block[i]) - "psubw %%mm3, %%mm1 \n\t" // abs(block[i]) - "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q - "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q - "pxor %%mm4, %%mm4 \n\t" - "pxor %%mm5, %%mm5 \n\t" // FIXME slow - "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 - "psraw $3, %%mm0 \n\t" - "psraw $3, %%mm1 \n\t" - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - "psubw %%mm2, %%mm0 \n\t" - "psubw %%mm3, %%mm1 \n\t" - "pandn %%mm0, %%mm4 \n\t" - "pandn %%mm1, %%mm5 \n\t" - "movq %%mm4, (%0, %%"REG_a") \n\t" - "movq %%mm5, 8(%0, %%"REG_a") \n\t" - - "add $16, %%"REG_a" \n\t" - "jng 1b \n\t" - ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs) - : "%"REG_a, "memory" - ); - block[0]= block0; - //Note, we dont do mismatch control for intra as errors cannot accumulate -} - -static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s, - DCTELEM *block, int n, int qscale) -{ - long nCoeffs; - const uint16_t *quant_matrix; - - assert(s->block_last_index[n]>=0); - - if(s->alternate_scan) nCoeffs= 63; //FIXME - else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; - - quant_matrix = s->inter_matrix; -asm volatile( - "pcmpeqw %%mm7, %%mm7 \n\t" - "psrlq $48, %%mm7 \n\t" - "movd %2, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "packssdw %%mm6, %%mm6 \n\t" - "mov %3, %%"REG_a" \n\t" - ASMALIGN(4) - "1: \n\t" - "movq (%0, %%"REG_a"), %%mm0 \n\t" - "movq 8(%0, %%"REG_a"), %%mm1 \n\t" - "movq (%1, %%"REG_a"), %%mm4 \n\t" - "movq 8(%1, %%"REG_a"), %%mm5 \n\t" - "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i] - "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i] - "pxor %%mm2, %%mm2 \n\t" - "pxor %%mm3, %%mm3 \n\t" - "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0 - "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0 - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - "psubw %%mm2, %%mm0 \n\t" // abs(block[i]) - "psubw %%mm3, %%mm1 \n\t" // abs(block[i]) - "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2 - "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2 - "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*2*q - "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*2*q - "paddw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q - "paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q - "pxor %%mm4, %%mm4 \n\t" - "pxor %%mm5, %%mm5 \n\t" // FIXME slow - "pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0 - "pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0 - "psrlw $4, %%mm0 \n\t" - "psrlw $4, %%mm1 \n\t" - "pxor %%mm2, %%mm0 \n\t" - "pxor %%mm3, %%mm1 \n\t" - "psubw %%mm2, %%mm0 \n\t" - "psubw %%mm3, %%mm1 \n\t" - "pandn %%mm0, %%mm4 \n\t" - "pandn %%mm1, %%mm5 \n\t" - "pxor %%mm4, %%mm7 \n\t" - "pxor %%mm5, %%mm7 \n\t" - "movq %%mm4, (%0, %%"REG_a") \n\t" - "movq %%mm5, 8(%0, %%"REG_a") \n\t" - - "add $16, %%"REG_a" \n\t" - "jng 1b \n\t" - "movd 124(%0, %3), %%mm0 \n\t" - "movq %%mm7, %%mm6 \n\t" - "psrlq $32, %%mm7 \n\t" - "pxor %%mm6, %%mm7 \n\t" - "movq %%mm7, %%mm6 \n\t" - "psrlq $16, %%mm7 \n\t" - "pxor %%mm6, %%mm7 \n\t" - "pslld $31, %%mm7 \n\t" - "psrlq $15, %%mm7 \n\t" - "pxor %%mm7, %%mm0 \n\t" - "movd %%mm0, 124(%0, %3) \n\t" - - ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "r" (-2*nCoeffs) - : "%"REG_a, "memory" - ); -} - -/* draw the edges of width 'w' of an image of size width, height - this mmx version can only handle w==8 || w==16 */ -static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w) -{ - uint8_t *ptr, *last_line; - int i; - - last_line = buf + (height - 1) * wrap; - /* left and right */ - ptr = buf; - if(w==8) - { - asm volatile( - "1: \n\t" - "movd (%0), %%mm0 \n\t" - "punpcklbw %%mm0, %%mm0 \n\t" - "punpcklwd %%mm0, %%mm0 \n\t" - "punpckldq %%mm0, %%mm0 \n\t" - "movq %%mm0, -8(%0) \n\t" - "movq -8(%0, %2), %%mm1 \n\t" - "punpckhbw %%mm1, %%mm1 \n\t" - "punpckhwd %%mm1, %%mm1 \n\t" - "punpckhdq %%mm1, %%mm1 \n\t" - "movq %%mm1, (%0, %2) \n\t" - "add %1, %0 \n\t" - "cmp %3, %0 \n\t" - " jb 1b \n\t" - : "+r" (ptr) - : "r" ((long)wrap), "r" ((long)width), "r" (ptr + wrap*height) - ); - } - else - { - asm volatile( - "1: \n\t" - "movd (%0), %%mm0 \n\t" - "punpcklbw %%mm0, %%mm0 \n\t" - "punpcklwd %%mm0, %%mm0 \n\t" - "punpckldq %%mm0, %%mm0 \n\t" - "movq %%mm0, -8(%0) \n\t" - "movq %%mm0, -16(%0) \n\t" - "movq -8(%0, %2), %%mm1 \n\t" - "punpckhbw %%mm1, %%mm1 \n\t" - "punpckhwd %%mm1, %%mm1 \n\t" - "punpckhdq %%mm1, %%mm1 \n\t" - "movq %%mm1, (%0, %2) \n\t" - "movq %%mm1, 8(%0, %2) \n\t" - "add %1, %0 \n\t" - "cmp %3, %0 \n\t" - " jb 1b \n\t" - : "+r" (ptr) - : "r" ((long)wrap), "r" ((long)width), "r" (ptr + wrap*height) - ); - } - - for(i=0;i<w;i+=4) { - /* top and bottom (and hopefully also the corners) */ - ptr= buf - (i + 1) * wrap - w; - asm volatile( - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm0, (%0, %2) \n\t" - "movq %%mm0, (%0, %2, 2) \n\t" - "movq %%mm0, (%0, %3) \n\t" - "add $8, %0 \n\t" - "cmp %4, %0 \n\t" - " jb 1b \n\t" - : "+r" (ptr) - : "r" ((long)buf - (long)ptr - w), "r" ((long)-wrap), "r" ((long)-wrap*3), "r" (ptr+width+2*w) - ); - ptr= last_line + (i + 1) * wrap - w; - asm volatile( - "1: \n\t" - "movq (%1, %0), %%mm0 \n\t" - "movq %%mm0, (%0) \n\t" - "movq %%mm0, (%0, %2) \n\t" - "movq %%mm0, (%0, %2, 2) \n\t" - "movq %%mm0, (%0, %3) \n\t" - "add $8, %0 \n\t" - "cmp %4, %0 \n\t" - " jb 1b \n\t" - : "+r" (ptr) - : "r" ((long)last_line - (long)ptr - w), "r" ((long)wrap), "r" ((long)wrap*3), "r" (ptr+width+2*w) - ); - } -} - -static void denoise_dct_mmx(MpegEncContext *s, DCTELEM *block){ - const int intra= s->mb_intra; - int *sum= s->dct_error_sum[intra]; - uint16_t *offset= s->dct_offset[intra]; - - s->dct_count[intra]++; - - asm volatile( - "pxor %%mm7, %%mm7 \n\t" - "1: \n\t" - "pxor %%mm0, %%mm0 \n\t" - "pxor %%mm1, %%mm1 \n\t" - "movq (%0), %%mm2 \n\t" - "movq 8(%0), %%mm3 \n\t" - "pcmpgtw %%mm2, %%mm0 \n\t" - "pcmpgtw %%mm3, %%mm1 \n\t" - "pxor %%mm0, %%mm2 \n\t" - "pxor %%mm1, %%mm3 \n\t" - "psubw %%mm0, %%mm2 \n\t" - "psubw %%mm1, %%mm3 \n\t" - "movq %%mm2, %%mm4 \n\t" - "movq %%mm3, %%mm5 \n\t" - "psubusw (%2), %%mm2 \n\t" - "psubusw 8(%2), %%mm3 \n\t" - "pxor %%mm0, %%mm2 \n\t" - "pxor %%mm1, %%mm3 \n\t" - "psubw %%mm0, %%mm2 \n\t" - "psubw %%mm1, %%mm3 \n\t" - "movq %%mm2, (%0) \n\t" - "movq %%mm3, 8(%0) \n\t" - "movq %%mm4, %%mm2 \n\t" - "movq %%mm5, %%mm3 \n\t" - "punpcklwd %%mm7, %%mm4 \n\t" - "punpckhwd %%mm7, %%mm2 \n\t" - "punpcklwd %%mm7, %%mm5 \n\t" - "punpckhwd %%mm7, %%mm3 \n\t" - "paddd (%1), %%mm4 \n\t" - "paddd 8(%1), %%mm2 \n\t" - "paddd 16(%1), %%mm5 \n\t" - "paddd 24(%1), %%mm3 \n\t" - "movq %%mm4, (%1) \n\t" - "movq %%mm2, 8(%1) \n\t" - "movq %%mm5, 16(%1) \n\t" - "movq %%mm3, 24(%1) \n\t" - "add $16, %0 \n\t" - "add $32, %1 \n\t" - "add $16, %2 \n\t" - "cmp %3, %0 \n\t" - " jb 1b \n\t" - : "+r" (block), "+r" (sum), "+r" (offset) - : "r"(block+64) - ); -} - -static void denoise_dct_sse2(MpegEncContext *s, DCTELEM *block){ - const int intra= s->mb_intra; - int *sum= s->dct_error_sum[intra]; - uint16_t *offset= s->dct_offset[intra]; - - s->dct_count[intra]++; - - asm volatile( - "pxor %%xmm7, %%xmm7 \n\t" - "1: \n\t" - "pxor %%xmm0, %%xmm0 \n\t" - "pxor %%xmm1, %%xmm1 \n\t" - "movdqa (%0), %%xmm2 \n\t" - "movdqa 16(%0), %%xmm3 \n\t" - "pcmpgtw %%xmm2, %%xmm0 \n\t" - "pcmpgtw %%xmm3, %%xmm1 \n\t" - "pxor %%xmm0, %%xmm2 \n\t" - "pxor %%xmm1, %%xmm3 \n\t" - "psubw %%xmm0, %%xmm2 \n\t" - "psubw %%xmm1, %%xmm3 \n\t" - "movdqa %%xmm2, %%xmm4 \n\t" - "movdqa %%xmm3, %%xmm5 \n\t" - "psubusw (%2), %%xmm2 \n\t" - "psubusw 16(%2), %%xmm3 \n\t" - "pxor %%xmm0, %%xmm2 \n\t" - "pxor %%xmm1, %%xmm3 \n\t" - "psubw %%xmm0, %%xmm2 \n\t" - "psubw %%xmm1, %%xmm3 \n\t" - "movdqa %%xmm2, (%0) \n\t" - "movdqa %%xmm3, 16(%0) \n\t" - "movdqa %%xmm4, %%xmm6 \n\t" - "movdqa %%xmm5, %%xmm0 \n\t" - "punpcklwd %%xmm7, %%xmm4 \n\t" - "punpckhwd %%xmm7, %%xmm6 \n\t" - "punpcklwd %%xmm7, %%xmm5 \n\t" - "punpckhwd %%xmm7, %%xmm0 \n\t" - "paddd (%1), %%xmm4 \n\t" - "paddd 16(%1), %%xmm6 \n\t" - "paddd 32(%1), %%xmm5 \n\t" - "paddd 48(%1), %%xmm0 \n\t" - "movdqa %%xmm4, (%1) \n\t" - "movdqa %%xmm6, 16(%1) \n\t" - "movdqa %%xmm5, 32(%1) \n\t" - "movdqa %%xmm0, 48(%1) \n\t" - "add $32, %0 \n\t" - "add $64, %1 \n\t" - "add $32, %2 \n\t" - "cmp %3, %0 \n\t" - " jb 1b \n\t" - : "+r" (block), "+r" (sum), "+r" (offset) - : "r"(block+64) - ); -} - -#undef HAVE_MMX2 -#define RENAME(a) a ## _MMX -#define RENAMEl(a) a ## _mmx -#include "mpegvideo_mmx_template.c" - -#define HAVE_MMX2 -#undef RENAME -#undef RENAMEl -#define RENAME(a) a ## _MMX2 -#define RENAMEl(a) a ## _mmx2 -#include "mpegvideo_mmx_template.c" - -#undef RENAME -#undef RENAMEl -#define RENAME(a) a ## _SSE2 -#define RENAMEl(a) a ## _sse2 -#include "mpegvideo_mmx_template.c" - -void MPV_common_init_mmx(MpegEncContext *s) -{ - if (mm_flags & MM_MMX) { - const int dct_algo = s->avctx->dct_algo; - - s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx; - s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx; - s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx; - s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx; - if(!(s->flags & CODEC_FLAG_BITEXACT)) - s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx; - s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx; - - draw_edges = draw_edges_mmx; - - if (mm_flags & MM_SSE2) { - s->denoise_dct= denoise_dct_sse2; - } else { - s->denoise_dct= denoise_dct_mmx; - } - - if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){ - if(mm_flags & MM_SSE2){ - s->dct_quantize= dct_quantize_SSE2; - } else if(mm_flags & MM_MMXEXT){ - s->dct_quantize= dct_quantize_MMX2; - } else { - s->dct_quantize= dct_quantize_MMX; - } - } - } -} diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c deleted file mode 100644 index d59b6efd9..000000000 --- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c +++ /dev/null @@ -1,348 +0,0 @@ -/* - * MPEG video MMX templates - * - * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at> - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#undef SPREADW -#undef PMAXW -#undef PMAX -#ifdef HAVE_MMX2 -#define SPREADW(a) "pshufw $0, " #a ", " #a " \n\t" -#define PMAXW(a,b) "pmaxsw " #a ", " #b " \n\t" -#define PMAX(a,b) \ - "pshufw $0x0E," #a ", " #b " \n\t"\ - PMAXW(b, a)\ - "pshufw $0x01," #a ", " #b " \n\t"\ - PMAXW(b, a) -#else -#define SPREADW(a) \ - "punpcklwd " #a ", " #a " \n\t"\ - "punpcklwd " #a ", " #a " \n\t" -#define PMAXW(a,b) \ - "psubusw " #a ", " #b " \n\t"\ - "paddw " #a ", " #b " \n\t" -#define PMAX(a,b) \ - "movq " #a ", " #b " \n\t"\ - "psrlq $32, " #a " \n\t"\ - PMAXW(b, a)\ - "movq " #a ", " #b " \n\t"\ - "psrlq $16, " #a " \n\t"\ - PMAXW(b, a) - -#endif - -static int RENAME(dct_quantize)(MpegEncContext *s, - DCTELEM *block, int n, - int qscale, int *overflow) -{ - long last_non_zero_p1; - int level=0, q; //=0 is cuz gcc says uninitalized ... - const uint16_t *qmat, *bias; - DECLARE_ALIGNED_8(int16_t, temp_block[64]); - - assert((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly? - - //s->fdct (block); - RENAMEl(ff_fdct) (block); //cant be anything else ... - - if(s->dct_error_sum) - s->denoise_dct(s, block); - - if (s->mb_intra) { - int dummy; - if (n < 4) - q = s->y_dc_scale; - else - q = s->c_dc_scale; - /* note: block[0] is assumed to be positive */ - if (!s->h263_aic) { -#if 1 - asm volatile ( - "mul %%ecx \n\t" - : "=d" (level), "=a"(dummy) - : "a" ((block[0]>>2) + q), "c" (ff_inverse[q<<1]) - ); -#else - asm volatile ( - "xorl %%edx, %%edx \n\t" - "divw %%cx \n\t" - "movzwl %%ax, %%eax \n\t" - : "=a" (level) - : "a" ((block[0]>>2) + q), "c" (q<<1) - : "%edx" - ); -#endif - } else - /* For AIC we skip quant/dequant of INTRADC */ - level = (block[0] + 4)>>3; - - block[0]=0; //avoid fake overflow -// temp_block[0] = (block[0] + (q >> 1)) / q; - last_non_zero_p1 = 1; - bias = s->q_intra_matrix16[qscale][1]; - qmat = s->q_intra_matrix16[qscale][0]; - } else { - last_non_zero_p1 = 0; - bias = s->q_inter_matrix16[qscale][1]; - qmat = s->q_inter_matrix16[qscale][0]; - } - - if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){ - - asm volatile( - "movd %%"REG_a", %%mm3 \n\t" // last_non_zero_p1 - SPREADW(%%mm3) - "pxor %%mm7, %%mm7 \n\t" // 0 - "pxor %%mm4, %%mm4 \n\t" // 0 - "movq (%2), %%mm5 \n\t" // qmat[0] - "pxor %%mm6, %%mm6 \n\t" - "psubw (%3), %%mm6 \n\t" // -bias[0] - "mov $-128, %%"REG_a" \n\t" - ASMALIGN(4) - "1: \n\t" - "pxor %%mm1, %%mm1 \n\t" // 0 - "movq (%1, %%"REG_a"), %%mm0 \n\t" // block[i] - "pcmpgtw %%mm0, %%mm1 \n\t" // block[i] <= 0 ? 0xFF : 0x00 - "pxor %%mm1, %%mm0 \n\t" - "psubw %%mm1, %%mm0 \n\t" // ABS(block[i]) - "psubusw %%mm6, %%mm0 \n\t" // ABS(block[i]) + bias[0] - "pmulhw %%mm5, %%mm0 \n\t" // (ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16 - "por %%mm0, %%mm4 \n\t" - "pxor %%mm1, %%mm0 \n\t" - "psubw %%mm1, %%mm0 \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) - "movq %%mm0, (%5, %%"REG_a") \n\t" - "pcmpeqw %%mm7, %%mm0 \n\t" // out==0 ? 0xFF : 0x00 - "movq (%4, %%"REG_a"), %%mm1 \n\t" - "movq %%mm7, (%1, %%"REG_a") \n\t" // 0 - "pandn %%mm1, %%mm0 \n\t" - PMAXW(%%mm0, %%mm3) - "add $8, %%"REG_a" \n\t" - " js 1b \n\t" - PMAX(%%mm3, %%mm0) - "movd %%mm3, %%"REG_a" \n\t" - "movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1 - : "+a" (last_non_zero_p1) - : "r" (block+64), "r" (qmat), "r" (bias), - "r" (inv_zigzag_direct16+64), "r" (temp_block+64) - ); - // note the asm is split cuz gcc doesnt like that many operands ... - asm volatile( - "movd %1, %%mm1 \n\t" // max_qcoeff - SPREADW(%%mm1) - "psubusw %%mm1, %%mm4 \n\t" - "packuswb %%mm4, %%mm4 \n\t" - "movd %%mm4, %0 \n\t" // *overflow - : "=g" (*overflow) - : "g" (s->max_qcoeff) - ); - }else{ // FMT_H263 - asm volatile( - "movd %%"REG_a", %%mm3 \n\t" // last_non_zero_p1 - SPREADW(%%mm3) - "pxor %%mm7, %%mm7 \n\t" // 0 - "pxor %%mm4, %%mm4 \n\t" // 0 - "mov $-128, %%"REG_a" \n\t" - ASMALIGN(4) - "1: \n\t" - "pxor %%mm1, %%mm1 \n\t" // 0 - "movq (%1, %%"REG_a"), %%mm0 \n\t" // block[i] - "pcmpgtw %%mm0, %%mm1 \n\t" // block[i] <= 0 ? 0xFF : 0x00 - "pxor %%mm1, %%mm0 \n\t" - "psubw %%mm1, %%mm0 \n\t" // ABS(block[i]) - "movq (%3, %%"REG_a"), %%mm6 \n\t" // bias[0] - "paddusw %%mm6, %%mm0 \n\t" // ABS(block[i]) + bias[0] - "movq (%2, %%"REG_a"), %%mm5 \n\t" // qmat[i] - "pmulhw %%mm5, %%mm0 \n\t" // (ABS(block[i])*qmat[0] + bias[0]*qmat[0])>>16 - "por %%mm0, %%mm4 \n\t" - "pxor %%mm1, %%mm0 \n\t" - "psubw %%mm1, %%mm0 \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) - "movq %%mm0, (%5, %%"REG_a") \n\t" - "pcmpeqw %%mm7, %%mm0 \n\t" // out==0 ? 0xFF : 0x00 - "movq (%4, %%"REG_a"), %%mm1 \n\t" - "movq %%mm7, (%1, %%"REG_a") \n\t" // 0 - "pandn %%mm1, %%mm0 \n\t" - PMAXW(%%mm0, %%mm3) - "add $8, %%"REG_a" \n\t" - " js 1b \n\t" - PMAX(%%mm3, %%mm0) - "movd %%mm3, %%"REG_a" \n\t" - "movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1 - : "+a" (last_non_zero_p1) - : "r" (block+64), "r" (qmat+64), "r" (bias+64), - "r" (inv_zigzag_direct16+64), "r" (temp_block+64) - ); - // note the asm is split cuz gcc doesnt like that many operands ... - asm volatile( - "movd %1, %%mm1 \n\t" // max_qcoeff - SPREADW(%%mm1) - "psubusw %%mm1, %%mm4 \n\t" - "packuswb %%mm4, %%mm4 \n\t" - "movd %%mm4, %0 \n\t" // *overflow - : "=g" (*overflow) - : "g" (s->max_qcoeff) - ); - } - - if(s->mb_intra) block[0]= level; - else block[0]= temp_block[0]; - - if(s->dsp.idct_permutation_type == FF_SIMPLE_IDCT_PERM){ - if(last_non_zero_p1 <= 1) goto end; - block[0x08] = temp_block[0x01]; block[0x10] = temp_block[0x08]; - block[0x20] = temp_block[0x10]; - if(last_non_zero_p1 <= 4) goto end; - block[0x18] = temp_block[0x09]; block[0x04] = temp_block[0x02]; - block[0x09] = temp_block[0x03]; - if(last_non_zero_p1 <= 7) goto end; - block[0x14] = temp_block[0x0A]; block[0x28] = temp_block[0x11]; - block[0x12] = temp_block[0x18]; block[0x02] = temp_block[0x20]; - if(last_non_zero_p1 <= 11) goto end; - block[0x1A] = temp_block[0x19]; block[0x24] = temp_block[0x12]; - block[0x19] = temp_block[0x0B]; block[0x01] = temp_block[0x04]; - block[0x0C] = temp_block[0x05]; - if(last_non_zero_p1 <= 16) goto end; - block[0x11] = temp_block[0x0C]; block[0x29] = temp_block[0x13]; - block[0x16] = temp_block[0x1A]; block[0x0A] = temp_block[0x21]; - block[0x30] = temp_block[0x28]; block[0x22] = temp_block[0x30]; - block[0x38] = temp_block[0x29]; block[0x06] = temp_block[0x22]; - if(last_non_zero_p1 <= 24) goto end; - block[0x1B] = temp_block[0x1B]; block[0x21] = temp_block[0x14]; - block[0x1C] = temp_block[0x0D]; block[0x05] = temp_block[0x06]; - block[0x0D] = temp_block[0x07]; block[0x15] = temp_block[0x0E]; - block[0x2C] = temp_block[0x15]; block[0x13] = temp_block[0x1C]; - if(last_non_zero_p1 <= 32) goto end; - block[0x0B] = temp_block[0x23]; block[0x34] = temp_block[0x2A]; - block[0x2A] = temp_block[0x31]; block[0x32] = temp_block[0x38]; - block[0x3A] = temp_block[0x39]; block[0x26] = temp_block[0x32]; - block[0x39] = temp_block[0x2B]; block[0x03] = temp_block[0x24]; - if(last_non_zero_p1 <= 40) goto end; - block[0x1E] = temp_block[0x1D]; block[0x25] = temp_block[0x16]; - block[0x1D] = temp_block[0x0F]; block[0x2D] = temp_block[0x17]; - block[0x17] = temp_block[0x1E]; block[0x0E] = temp_block[0x25]; - block[0x31] = temp_block[0x2C]; block[0x2B] = temp_block[0x33]; - if(last_non_zero_p1 <= 48) goto end; - block[0x36] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B]; - block[0x23] = temp_block[0x34]; block[0x3C] = temp_block[0x2D]; - block[0x07] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; - block[0x0F] = temp_block[0x27]; block[0x35] = temp_block[0x2E]; - if(last_non_zero_p1 <= 56) goto end; - block[0x2E] = temp_block[0x35]; block[0x33] = temp_block[0x3C]; - block[0x3E] = temp_block[0x3D]; block[0x27] = temp_block[0x36]; - block[0x3D] = temp_block[0x2F]; block[0x2F] = temp_block[0x37]; - block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; - }else if(s->dsp.idct_permutation_type == FF_LIBMPEG2_IDCT_PERM){ - if(last_non_zero_p1 <= 1) goto end; - block[0x04] = temp_block[0x01]; - block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10]; - if(last_non_zero_p1 <= 4) goto end; - block[0x0C] = temp_block[0x09]; block[0x01] = temp_block[0x02]; - block[0x05] = temp_block[0x03]; - if(last_non_zero_p1 <= 7) goto end; - block[0x09] = temp_block[0x0A]; block[0x14] = temp_block[0x11]; - block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20]; - if(last_non_zero_p1 <= 11) goto end; - block[0x1C] = temp_block[0x19]; - block[0x11] = temp_block[0x12]; block[0x0D] = temp_block[0x0B]; - block[0x02] = temp_block[0x04]; block[0x06] = temp_block[0x05]; - if(last_non_zero_p1 <= 16) goto end; - block[0x0A] = temp_block[0x0C]; block[0x15] = temp_block[0x13]; - block[0x19] = temp_block[0x1A]; block[0x24] = temp_block[0x21]; - block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30]; - block[0x2C] = temp_block[0x29]; block[0x21] = temp_block[0x22]; - if(last_non_zero_p1 <= 24) goto end; - block[0x1D] = temp_block[0x1B]; block[0x12] = temp_block[0x14]; - block[0x0E] = temp_block[0x0D]; block[0x03] = temp_block[0x06]; - block[0x07] = temp_block[0x07]; block[0x0B] = temp_block[0x0E]; - block[0x16] = temp_block[0x15]; block[0x1A] = temp_block[0x1C]; - if(last_non_zero_p1 <= 32) goto end; - block[0x25] = temp_block[0x23]; block[0x29] = temp_block[0x2A]; - block[0x34] = temp_block[0x31]; block[0x38] = temp_block[0x38]; - block[0x3C] = temp_block[0x39]; block[0x31] = temp_block[0x32]; - block[0x2D] = temp_block[0x2B]; block[0x22] = temp_block[0x24]; - if(last_non_zero_p1 <= 40) goto end; - block[0x1E] = temp_block[0x1D]; block[0x13] = temp_block[0x16]; - block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17]; - block[0x1B] = temp_block[0x1E]; block[0x26] = temp_block[0x25]; - block[0x2A] = temp_block[0x2C]; block[0x35] = temp_block[0x33]; - if(last_non_zero_p1 <= 48) goto end; - block[0x39] = temp_block[0x3A]; block[0x3D] = temp_block[0x3B]; - block[0x32] = temp_block[0x34]; block[0x2E] = temp_block[0x2D]; - block[0x23] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; - block[0x27] = temp_block[0x27]; block[0x2B] = temp_block[0x2E]; - if(last_non_zero_p1 <= 56) goto end; - block[0x36] = temp_block[0x35]; block[0x3A] = temp_block[0x3C]; - block[0x3E] = temp_block[0x3D]; block[0x33] = temp_block[0x36]; - block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37]; - block[0x3B] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; - }else{ - if(last_non_zero_p1 <= 1) goto end; - block[0x01] = temp_block[0x01]; - block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10]; - if(last_non_zero_p1 <= 4) goto end; - block[0x09] = temp_block[0x09]; block[0x02] = temp_block[0x02]; - block[0x03] = temp_block[0x03]; - if(last_non_zero_p1 <= 7) goto end; - block[0x0A] = temp_block[0x0A]; block[0x11] = temp_block[0x11]; - block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20]; - if(last_non_zero_p1 <= 11) goto end; - block[0x19] = temp_block[0x19]; - block[0x12] = temp_block[0x12]; block[0x0B] = temp_block[0x0B]; - block[0x04] = temp_block[0x04]; block[0x05] = temp_block[0x05]; - if(last_non_zero_p1 <= 16) goto end; - block[0x0C] = temp_block[0x0C]; block[0x13] = temp_block[0x13]; - block[0x1A] = temp_block[0x1A]; block[0x21] = temp_block[0x21]; - block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30]; - block[0x29] = temp_block[0x29]; block[0x22] = temp_block[0x22]; - if(last_non_zero_p1 <= 24) goto end; - block[0x1B] = temp_block[0x1B]; block[0x14] = temp_block[0x14]; - block[0x0D] = temp_block[0x0D]; block[0x06] = temp_block[0x06]; - block[0x07] = temp_block[0x07]; block[0x0E] = temp_block[0x0E]; - block[0x15] = temp_block[0x15]; block[0x1C] = temp_block[0x1C]; - if(last_non_zero_p1 <= 32) goto end; - block[0x23] = temp_block[0x23]; block[0x2A] = temp_block[0x2A]; - block[0x31] = temp_block[0x31]; block[0x38] = temp_block[0x38]; - block[0x39] = temp_block[0x39]; block[0x32] = temp_block[0x32]; - block[0x2B] = temp_block[0x2B]; block[0x24] = temp_block[0x24]; - if(last_non_zero_p1 <= 40) goto end; - block[0x1D] = temp_block[0x1D]; block[0x16] = temp_block[0x16]; - block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17]; - block[0x1E] = temp_block[0x1E]; block[0x25] = temp_block[0x25]; - block[0x2C] = temp_block[0x2C]; block[0x33] = temp_block[0x33]; - if(last_non_zero_p1 <= 48) goto end; - block[0x3A] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B]; - block[0x34] = temp_block[0x34]; block[0x2D] = temp_block[0x2D]; - block[0x26] = temp_block[0x26]; block[0x1F] = temp_block[0x1F]; - block[0x27] = temp_block[0x27]; block[0x2E] = temp_block[0x2E]; - if(last_non_zero_p1 <= 56) goto end; - block[0x35] = temp_block[0x35]; block[0x3C] = temp_block[0x3C]; - block[0x3D] = temp_block[0x3D]; block[0x36] = temp_block[0x36]; - block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37]; - block[0x3E] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; - } - end: -/* - for(i=0; i<last_non_zero_p1; i++) - { - int j= zigzag_direct_noperm[i]; - block[block_permute_op(j)]= temp_block[j]; - } -*/ - - return last_non_zero_p1 - 1; -} diff --git a/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c b/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c deleted file mode 100644 index 525ef34f7..000000000 --- a/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c +++ /dev/null @@ -1,1294 +0,0 @@ -/* - * Simple IDCT MMX - * - * Copyright (c) 2001, 2002 Michael Niedermayer <michaelni@gmx.at> - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ -#include "../dsputil.h" -#include "../simple_idct.h" - -/* -23170.475006 -22725.260826 -21406.727617 -19265.545870 -16384.000000 -12872.826198 -8866.956905 -4520.335430 -*/ -#define C0 23170 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define C1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define C2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define C3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#if 0 -#define C4 16384 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#else -#define C4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) - 0.5 -#endif -#define C5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define C6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -#define C7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - -#define ROW_SHIFT 11 -#define COL_SHIFT 20 // 6 - -static const uint64_t attribute_used __attribute__((aligned(8))) wm1010= 0xFFFF0000FFFF0000ULL; -static const uint64_t attribute_used __attribute__((aligned(8))) d40000= 0x0000000000040000ULL; - -static const int16_t __attribute__((aligned(8))) coeffs[]= { - 1<<(ROW_SHIFT-1), 0, 1<<(ROW_SHIFT-1), 0, -// 1<<(COL_SHIFT-1), 0, 1<<(COL_SHIFT-1), 0, -// 0, 1<<(COL_SHIFT-1-16), 0, 1<<(COL_SHIFT-1-16), - 1<<(ROW_SHIFT-1), 1, 1<<(ROW_SHIFT-1), 0, - // the 1 = ((1<<(COL_SHIFT-1))/C4)<<ROW_SHIFT :) -// 0, 0, 0, 0, -// 0, 0, 0, 0, - - C4, C4, C4, C4, - C4, -C4, C4, -C4, - - C2, C6, C2, C6, - C6, -C2, C6, -C2, - - C1, C3, C1, C3, - C5, C7, C5, C7, - - C3, -C7, C3, -C7, --C1, -C5, -C1, -C5, - - C5, -C1, C5, -C1, - C7, C3, C7, C3, - - C7, -C5, C7, -C5, - C3, -C1, C3, -C1 -}; - -#if 0 -static void unused_var_killer(){ - int a= wm1010 + d40000; - temp[0]=a; -} - -static void inline idctCol (int16_t * col, int16_t *input) -{ -#undef C0 -#undef C1 -#undef C2 -#undef C3 -#undef C4 -#undef C5 -#undef C6 -#undef C7 - int a0, a1, a2, a3, b0, b1, b2, b3; - const int C0 = 23170; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 -/* - if( !(col[8*1] | col[8*2] |col[8*3] |col[8*4] |col[8*5] |col[8*6] | col[8*7])) { - col[8*0] = col[8*1] = col[8*2] = col[8*3] = col[8*4] = - col[8*5] = col[8*6] = col[8*7] = col[8*0]<<3; - return; - }*/ - -col[8*0] = input[8*0 + 0]; -col[8*1] = input[8*2 + 0]; -col[8*2] = input[8*0 + 1]; -col[8*3] = input[8*2 + 1]; -col[8*4] = input[8*4 + 0]; -col[8*5] = input[8*6 + 0]; -col[8*6] = input[8*4 + 1]; -col[8*7] = input[8*6 + 1]; - - a0 = C4*col[8*0] + C2*col[8*2] + C4*col[8*4] + C6*col[8*6] + (1<<(COL_SHIFT-1)); - a1 = C4*col[8*0] + C6*col[8*2] - C4*col[8*4] - C2*col[8*6] + (1<<(COL_SHIFT-1)); - a2 = C4*col[8*0] - C6*col[8*2] - C4*col[8*4] + C2*col[8*6] + (1<<(COL_SHIFT-1)); - a3 = C4*col[8*0] - C2*col[8*2] + C4*col[8*4] - C6*col[8*6] + (1<<(COL_SHIFT-1)); - - b0 = C1*col[8*1] + C3*col[8*3] + C5*col[8*5] + C7*col[8*7]; - b1 = C3*col[8*1] - C7*col[8*3] - C1*col[8*5] - C5*col[8*7]; - b2 = C5*col[8*1] - C1*col[8*3] + C7*col[8*5] + C3*col[8*7]; - b3 = C7*col[8*1] - C5*col[8*3] + C3*col[8*5] - C1*col[8*7]; - - col[8*0] = (a0 + b0) >> COL_SHIFT; - col[8*1] = (a1 + b1) >> COL_SHIFT; - col[8*2] = (a2 + b2) >> COL_SHIFT; - col[8*3] = (a3 + b3) >> COL_SHIFT; - col[8*4] = (a3 - b3) >> COL_SHIFT; - col[8*5] = (a2 - b2) >> COL_SHIFT; - col[8*6] = (a1 - b1) >> COL_SHIFT; - col[8*7] = (a0 - b0) >> COL_SHIFT; -} - -static void inline idctRow (int16_t * output, int16_t * input) -{ - int16_t row[8]; - - int a0, a1, a2, a3, b0, b1, b2, b3; - const int C0 = 23170; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5 - -row[0] = input[0]; -row[2] = input[1]; -row[4] = input[4]; -row[6] = input[5]; -row[1] = input[8]; -row[3] = input[9]; -row[5] = input[12]; -row[7] = input[13]; - - if( !(row[1] | row[2] |row[3] |row[4] |row[5] |row[6] | row[7]) ) { - row[0] = row[1] = row[2] = row[3] = row[4] = - row[5] = row[6] = row[7] = row[0]<<3; - output[0] = row[0]; - output[2] = row[1]; - output[4] = row[2]; - output[6] = row[3]; - output[8] = row[4]; - output[10] = row[5]; - output[12] = row[6]; - output[14] = row[7]; - return; - } - - a0 = C4*row[0] + C2*row[2] + C4*row[4] + C6*row[6] + (1<<(ROW_SHIFT-1)); - a1 = C4*row[0] + C6*row[2] - C4*row[4] - C2*row[6] + (1<<(ROW_SHIFT-1)); - a2 = C4*row[0] - C6*row[2] - C4*row[4] + C2*row[6] + (1<<(ROW_SHIFT-1)); - a3 = C4*row[0] - C2*row[2] + C4*row[4] - C6*row[6] + (1<<(ROW_SHIFT-1)); - - b0 = C1*row[1] + C3*row[3] + C5*row[5] + C7*row[7]; - b1 = C3*row[1] - C7*row[3] - C1*row[5] - C5*row[7]; - b2 = C5*row[1] - C1*row[3] + C7*row[5] + C3*row[7]; - b3 = C7*row[1] - C5*row[3] + C3*row[5] - C1*row[7]; - - row[0] = (a0 + b0) >> ROW_SHIFT; - row[1] = (a1 + b1) >> ROW_SHIFT; - row[2] = (a2 + b2) >> ROW_SHIFT; - row[3] = (a3 + b3) >> ROW_SHIFT; - row[4] = (a3 - b3) >> ROW_SHIFT; - row[5] = (a2 - b2) >> ROW_SHIFT; - row[6] = (a1 - b1) >> ROW_SHIFT; - row[7] = (a0 - b0) >> ROW_SHIFT; - - output[0] = row[0]; - output[2] = row[1]; - output[4] = row[2]; - output[6] = row[3]; - output[8] = row[4]; - output[10] = row[5]; - output[12] = row[6]; - output[14] = row[7]; -} -#endif - -static inline void idct(int16_t *block) -{ - int64_t __attribute__((aligned(8))) align_tmp[16]; - int16_t * const temp= (int16_t*)align_tmp; - - asm volatile( -#if 0 //Alternative, simpler variant - -#define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - #rounder ", %%mm4 \n\t"\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - #rounder ", %%mm0 \n\t"\ - "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ - "paddd %%mm0, %%mm0 \n\t" \ - "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ - "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ - "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ - "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ - "movq %%mm7, " #dst " \n\t"\ - "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "movq %%mm2, 24+" #dst " \n\t"\ - "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ - "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm0 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ - "movq %%mm2, 8+" #dst " \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ - "movq %%mm4, 16+" #dst " \n\t"\ - -#define COL_IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ - "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ - "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\ - "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm0 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "movd %%mm7, " #dst " \n\t"\ - "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "movd %%mm0, 16+" #dst " \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "movd %%mm2, 96+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "movd %%mm4, 112+" #dst " \n\t"\ - "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ - "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm5 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "movd %%mm2, 32+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ - "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ - "movd %%mm6, 48+" #dst " \n\t"\ - "movd %%mm4, 64+" #dst " \n\t"\ - "movd %%mm5, 80+" #dst " \n\t"\ - - -#define DC_COND_ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq "MANGLE(wm1010)", %%mm4 \n\t"\ - "pand %%mm0, %%mm4 \n\t"\ - "por %%mm1, %%mm4 \n\t"\ - "por %%mm2, %%mm4 \n\t"\ - "por %%mm3, %%mm4 \n\t"\ - "packssdw %%mm4,%%mm4 \n\t"\ - "movd %%mm4, %%eax \n\t"\ - "orl %%eax, %%eax \n\t"\ - "jz 1f \n\t"\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - #rounder ", %%mm4 \n\t"\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - #rounder ", %%mm0 \n\t"\ - "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ - "paddd %%mm0, %%mm0 \n\t" \ - "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ - "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ - "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ - "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ - "movq %%mm7, " #dst " \n\t"\ - "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "movq %%mm2, 24+" #dst " \n\t"\ - "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ - "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm0 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ - "movq %%mm2, 8+" #dst " \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ - "movq %%mm4, 16+" #dst " \n\t"\ - "jmp 2f \n\t"\ - "1: \n\t"\ - "pslld $16, %%mm0 \n\t"\ - "#paddd "MANGLE(d40000)", %%mm0 \n\t"\ - "psrad $13, %%mm0 \n\t"\ - "packssdw %%mm0, %%mm0 \n\t"\ - "movq %%mm0, " #dst " \n\t"\ - "movq %%mm0, 8+" #dst " \n\t"\ - "movq %%mm0, 16+" #dst " \n\t"\ - "movq %%mm0, 24+" #dst " \n\t"\ - "2: \n\t" - - -//IDCT( src0, src4, src1, src5, dst, rounder, shift) -ROW_IDCT( (%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11) -/*ROW_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1), paddd (%2), 11) -ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1), paddd (%2), 11) -ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1), paddd (%2), 11)*/ - -DC_COND_ROW_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11) -DC_COND_ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11) -DC_COND_ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11) - - -//IDCT( src0, src4, src1, src5, dst, shift) -COL_IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -COL_IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -COL_IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -COL_IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - -#else - -#define DC_COND_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq "MANGLE(wm1010)", %%mm4 \n\t"\ - "pand %%mm0, %%mm4 \n\t"\ - "por %%mm1, %%mm4 \n\t"\ - "por %%mm2, %%mm4 \n\t"\ - "por %%mm3, %%mm4 \n\t"\ - "packssdw %%mm4,%%mm4 \n\t"\ - "movd %%mm4, %%eax \n\t"\ - "orl %%eax, %%eax \n\t"\ - "jz 1f \n\t"\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - #rounder ", %%mm4 \n\t"\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - #rounder ", %%mm0 \n\t"\ - "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ - "paddd %%mm0, %%mm0 \n\t" \ - "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ - "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ - "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ - "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ - "movq %%mm7, " #dst " \n\t"\ - "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "movq %%mm2, 24+" #dst " \n\t"\ - "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ - "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm0 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ - "movq %%mm2, 8+" #dst " \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ - "movq %%mm4, 16+" #dst " \n\t"\ - "jmp 2f \n\t"\ - "1: \n\t"\ - "pslld $16, %%mm0 \n\t"\ - "paddd "MANGLE(d40000)", %%mm0 \n\t"\ - "psrad $13, %%mm0 \n\t"\ - "packssdw %%mm0, %%mm0 \n\t"\ - "movq %%mm0, " #dst " \n\t"\ - "movq %%mm0, 8+" #dst " \n\t"\ - "movq %%mm0, 16+" #dst " \n\t"\ - "movq %%mm0, 24+" #dst " \n\t"\ - "2: \n\t" - -#define Z_COND_IDCT(src0, src4, src1, src5, dst, rounder, shift, bt) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq %%mm0, %%mm4 \n\t"\ - "por %%mm1, %%mm4 \n\t"\ - "por %%mm2, %%mm4 \n\t"\ - "por %%mm3, %%mm4 \n\t"\ - "packssdw %%mm4,%%mm4 \n\t"\ - "movd %%mm4, %%eax \n\t"\ - "orl %%eax, %%eax \n\t"\ - "jz " #bt " \n\t"\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - #rounder ", %%mm4 \n\t"\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - #rounder ", %%mm0 \n\t"\ - "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ - "paddd %%mm0, %%mm0 \n\t" \ - "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ - "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ - "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ - "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ - "movq %%mm7, " #dst " \n\t"\ - "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "movq %%mm2, 24+" #dst " \n\t"\ - "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ - "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm0 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ - "movq %%mm2, 8+" #dst " \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ - "movq %%mm4, 16+" #dst " \n\t"\ - -#define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - #rounder ", %%mm4 \n\t"\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - #rounder ", %%mm0 \n\t"\ - "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\ - "paddd %%mm0, %%mm0 \n\t" \ - "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\ - "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\ - "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\ - "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\ - "movq %%mm7, " #dst " \n\t"\ - "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "movq %%mm2, 24+" #dst " \n\t"\ - "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ - "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm0 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\ - "movq %%mm2, 8+" #dst " \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\ - "movq %%mm4, 16+" #dst " \n\t"\ - -//IDCT( src0, src4, src1, src5, dst, rounder, shift) -DC_COND_IDCT( 0(%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11) -Z_COND_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11, 4f) -Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 2f) -Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 1f) - -#undef IDCT -#define IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ - "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ - "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\ - "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm0 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "movd %%mm7, " #dst " \n\t"\ - "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "movd %%mm0, 16+" #dst " \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "movd %%mm2, 96+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "movd %%mm4, 112+" #dst " \n\t"\ - "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ - "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm5 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "movd %%mm2, 32+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ - "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ - "movd %%mm6, 48+" #dst " \n\t"\ - "movd %%mm4, 64+" #dst " \n\t"\ - "movd %%mm5, 80+" #dst " \n\t" - - -//IDCT( src0, src4, src1, src5, dst, shift) -IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - "jmp 9f \n\t" - - "#" ASMALIGN(4) \ - "4: \n\t" -Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 6f) -Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f) - -#undef IDCT -#define IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ - "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ - "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm1 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm1, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm7, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm7, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm0 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm1, %%mm1 \n\t" /* A0+B0 a0+b0 */\ - "movd %%mm1, " #dst " \n\t"\ - "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "movd %%mm0, 16+" #dst " \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "movd %%mm2, 96+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "movd %%mm4, 112+" #dst " \n\t"\ - "movq 88(%2), %%mm1 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm1, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm1, %%mm5 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm5 \n\t"\ - "movq %%mm6, %%mm1 \n\t" /* A3 a3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm1 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "psrad $" #shift ", %%mm1 \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "movd %%mm2, 32+" #dst " \n\t"\ - "packssdw %%mm1, %%mm1 \n\t" /* A3-B3 a3-b3 */\ - "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ - "movd %%mm6, 48+" #dst " \n\t"\ - "movd %%mm1, 64+" #dst " \n\t"\ - "movd %%mm5, 80+" #dst " \n\t" - -//IDCT( src0, src4, src1, src5, dst, shift) -IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - "jmp 9f \n\t" - - "#" ASMALIGN(4) \ - "6: \n\t" -Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f) - -#undef IDCT -#define IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm1 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm1, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm7, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm7, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm0 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm1, %%mm1 \n\t" /* A0+B0 a0+b0 */\ - "movd %%mm1, " #dst " \n\t"\ - "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "movd %%mm0, 16+" #dst " \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "movd %%mm2, 96+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "movd %%mm4, 112+" #dst " \n\t"\ - "movq 88(%2), %%mm1 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm1, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm1, %%mm5 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm5 \n\t"\ - "movq %%mm6, %%mm1 \n\t" /* A3 a3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm1 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "psrad $" #shift ", %%mm1 \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "movd %%mm2, 32+" #dst " \n\t"\ - "packssdw %%mm1, %%mm1 \n\t" /* A3-B3 a3-b3 */\ - "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ - "movd %%mm6, 48+" #dst " \n\t"\ - "movd %%mm1, 64+" #dst " \n\t"\ - "movd %%mm5, 80+" #dst " \n\t" - - -//IDCT( src0, src4, src1, src5, dst, shift) -IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - "jmp 9f \n\t" - - "#" ASMALIGN(4) \ - "2: \n\t" -Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f) - -#undef IDCT -#define IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\ - "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\ - "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\ - "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm0 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "movd %%mm7, " #dst " \n\t"\ - "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "movd %%mm0, 16+" #dst " \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\ - "movd %%mm2, 96+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "movd %%mm4, 112+" #dst " \n\t"\ - "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\ - "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\ - "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\ - "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\ - "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\ - "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm2 \n\t"\ - "psrad $" #shift ", %%mm5 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\ - "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\ - "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "movd %%mm2, 32+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ - "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ - "movd %%mm6, 48+" #dst " \n\t"\ - "movd %%mm4, 64+" #dst " \n\t"\ - "movd %%mm5, 80+" #dst " \n\t" - -//IDCT( src0, src4, src1, src5, dst, shift) -IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - "jmp 9f \n\t" - - "#" ASMALIGN(4) \ - "3: \n\t" -#undef IDCT -#define IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 64(%2), %%mm3 \n\t"\ - "pmaddwd %%mm2, %%mm3 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm0, %%mm1 \n\t" /* A1 a1 */\ - "paddd %%mm3, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm3, %%mm1 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm0 \n\t"\ - "psrad $" #shift ", %%mm1 \n\t"\ - "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "movd %%mm7, " #dst " \n\t"\ - "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "movd %%mm0, 16+" #dst " \n\t"\ - "packssdw %%mm1, %%mm1 \n\t" /* A1-B1 a1-b1 */\ - "movd %%mm1, 96+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "movd %%mm4, 112+" #dst " \n\t"\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "pmaddwd %%mm2, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "pmaddwd 96(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "movq %%mm5, %%mm1 \n\t" /* A2 a2 */\ - "paddd %%mm4, %%mm1 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "psrad $" #shift ", %%mm5 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm2, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm2, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm1, %%mm1 \n\t" /* A2+B2 a2+b2 */\ - "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "movd %%mm1, 32+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ - "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ - "movd %%mm6, 48+" #dst " \n\t"\ - "movd %%mm4, 64+" #dst " \n\t"\ - "movd %%mm5, 80+" #dst " \n\t" - - -//IDCT( src0, src4, src1, src5, dst, shift) -IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - "jmp 9f \n\t" - - "#" ASMALIGN(4) \ - "5: \n\t" -#undef IDCT -#define IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ - "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ - "movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\ - "movq 8+" #src4 ", %%mm3 \n\t" /* R6 R2 r6 r2 */\ - "movq 16(%2), %%mm1 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm2, %%mm1 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm3, %%mm7 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "pmaddwd 40(%2), %%mm3 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "paddd %%mm1, %%mm7 \n\t" /* A0 a0 */\ - "paddd %%mm1, %%mm1 \n\t" /* 2C0 2c0 */\ - "psubd %%mm7, %%mm1 \n\t" /* A3 a3 */\ - "paddd %%mm2, %%mm3 \n\t" /* A1 a1 */\ - "paddd %%mm2, %%mm2 \n\t" /* 2C1 2c1 */\ - "psubd %%mm3, %%mm2 \n\t" /* A2 a2 */\ - "psrad $" #shift ", %%mm4 \n\t"\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm3 \n\t"\ - "packssdw %%mm7, %%mm4 \n\t" /* A0 a0 */\ - "movq %%mm4, " #dst " \n\t"\ - "psrad $" #shift ", %%mm0 \n\t"\ - "packssdw %%mm3, %%mm0 \n\t" /* A1 a1 */\ - "movq %%mm0, 16+" #dst " \n\t"\ - "movq %%mm0, 96+" #dst " \n\t"\ - "movq %%mm4, 112+" #dst " \n\t"\ - "psrad $" #shift ", %%mm5 \n\t"\ - "psrad $" #shift ", %%mm6 \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm2, %%mm5 \n\t" /* A2-B2 a2-b2 */\ - "movq %%mm5, 32+" #dst " \n\t"\ - "psrad $" #shift ", %%mm1 \n\t"\ - "packssdw %%mm1, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "movq %%mm6, 48+" #dst " \n\t"\ - "movq %%mm6, 64+" #dst " \n\t"\ - "movq %%mm5, 80+" #dst " \n\t" - - -//IDCT( src0, src4, src1, src5, dst, shift) -IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - "jmp 9f \n\t" - - - "#" ASMALIGN(4) \ - "1: \n\t" -#undef IDCT -#define IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\ - "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\ - "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\ - "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\ - "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\ - "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\ - "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\ - "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\ - "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\ - "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\ - "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\ - "movq 64(%2), %%mm1 \n\t"\ - "pmaddwd %%mm2, %%mm1 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\ - "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\ - "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "psrad $" #shift ", %%mm7 \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "movq %%mm0, %%mm3 \n\t" /* A1 a1 */\ - "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "psubd %%mm1, %%mm3 \n\t" /* A1-B1 a1-b1 */\ - "psrad $" #shift ", %%mm0 \n\t"\ - "psrad $" #shift ", %%mm3 \n\t"\ - "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\ - "movd %%mm7, " #dst " \n\t"\ - "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\ - "movd %%mm0, 16+" #dst " \n\t"\ - "packssdw %%mm3, %%mm3 \n\t" /* A1-B1 a1-b1 */\ - "movd %%mm3, 96+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\ - "movd %%mm4, 112+" #dst " \n\t"\ - "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\ - "pmaddwd %%mm2, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\ - "pmaddwd 96(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\ - "movq %%mm5, %%mm3 \n\t" /* A2 a2 */\ - "paddd %%mm4, %%mm3 \n\t" /* A2+B2 a2+b2 */\ - "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\ - "psrad $" #shift ", %%mm3 \n\t"\ - "psrad $" #shift ", %%mm5 \n\t"\ - "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\ - "paddd %%mm2, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "psubd %%mm2, %%mm4 \n\t" /* a3-B3 a3-b3 */\ - "psrad $" #shift ", %%mm6 \n\t"\ - "packssdw %%mm3, %%mm3 \n\t" /* A2+B2 a2+b2 */\ - "movd %%mm3, 32+" #dst " \n\t"\ - "psrad $" #shift ", %%mm4 \n\t"\ - "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\ - "movd %%mm6, 48+" #dst " \n\t"\ - "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\ - "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\ - "movd %%mm4, 64+" #dst " \n\t"\ - "movd %%mm5, 80+" #dst " \n\t" - - -//IDCT( src0, src4, src1, src5, dst, shift) -IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - "jmp 9f \n\t" - - - "#" ASMALIGN(4) - "7: \n\t" -#undef IDCT -#define IDCT(src0, src4, src1, src5, dst, shift) \ - "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\ - "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "psrad $" #shift ", %%mm4 \n\t"\ - "psrad $" #shift ", %%mm0 \n\t"\ - "movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\ - "movq 16(%2), %%mm1 \n\t" /* C4 C4 C4 C4 */\ - "pmaddwd %%mm2, %%mm1 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\ - "movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\ - "pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\ - "movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\ - "psrad $" #shift ", %%mm1 \n\t"\ - "packssdw %%mm1, %%mm4 \n\t" /* A0 a0 */\ - "movq %%mm4, " #dst " \n\t"\ - "psrad $" #shift ", %%mm2 \n\t"\ - "packssdw %%mm2, %%mm0 \n\t" /* A1 a1 */\ - "movq %%mm0, 16+" #dst " \n\t"\ - "movq %%mm0, 96+" #dst " \n\t"\ - "movq %%mm4, 112+" #dst " \n\t"\ - "movq %%mm0, 32+" #dst " \n\t"\ - "movq %%mm4, 48+" #dst " \n\t"\ - "movq %%mm4, 64+" #dst " \n\t"\ - "movq %%mm0, 80+" #dst " \n\t" - -//IDCT( src0, src4, src1, src5, dst, shift) -IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0), 20) -//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0), 20) -IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0), 20) -//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0), 20) - - -#endif - -/* -Input - 00 40 04 44 20 60 24 64 - 10 30 14 34 50 70 54 74 - 01 41 03 43 21 61 23 63 - 11 31 13 33 51 71 53 73 - 02 42 06 46 22 62 26 66 - 12 32 16 36 52 72 56 76 - 05 45 07 47 25 65 27 67 - 15 35 17 37 55 75 57 77 - -Temp - 00 04 10 14 20 24 30 34 - 40 44 50 54 60 64 70 74 - 01 03 11 13 21 23 31 33 - 41 43 51 53 61 63 71 73 - 02 06 12 16 22 26 32 36 - 42 46 52 56 62 66 72 76 - 05 07 15 17 25 27 35 37 - 45 47 55 57 65 67 75 77 -*/ - -"9: \n\t" - :: "r" (block), "r" (temp), "r" (coeffs) - : "%eax" - ); -} - -void ff_simple_idct_mmx(int16_t *block) -{ - idct(block); -} - -//FIXME merge add/put into the idct - -void ff_simple_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block) -{ - idct(block); - put_pixels_clamped_mmx(block, dest, line_size); -} -void ff_simple_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block) -{ - idct(block); - add_pixels_clamped_mmx(block, dest, line_size); -} diff --git a/src/libffmpeg/libavcodec/i386/vp3dsp_mmx.c b/src/libffmpeg/libavcodec/i386/vp3dsp_mmx.c deleted file mode 100644 index f715dc803..000000000 --- a/src/libffmpeg/libavcodec/i386/vp3dsp_mmx.c +++ /dev/null @@ -1,324 +0,0 @@ -/* - * Copyright (C) 2004 the ffmpeg project - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file vp3dsp_mmx.c - * MMX-optimized functions cribbed from the original VP3 source code. - */ - -#include "../dsputil.h" -#include "mmx.h" - -#define IdctAdjustBeforeShift 8 - -/* (12 * 4) 2-byte memory locations ( = 96 bytes total) - * idct_constants[0..15] = Mask table (M(I)) - * idct_constants[16..43] = Cosine table (C(I)) - * idct_constants[44..47] = 8 - */ -static uint16_t idct_constants[(4 + 7 + 1) * 4]; -static const uint16_t idct_cosine_table[7] = { - 64277, 60547, 54491, 46341, 36410, 25080, 12785 -}; - -#define r0 mm0 -#define r1 mm1 -#define r2 mm2 -#define r3 mm3 -#define r4 mm4 -#define r5 mm5 -#define r6 mm6 -#define r7 mm7 - -/* from original comments: The Macro does IDct on 4 1-D Dcts */ -#define BeginIDCT() { \ - movq_m2r(*I(3), r2); \ - movq_m2r(*C(3), r6); \ - movq_r2r(r2, r4); \ - movq_m2r(*J(5), r7); \ - pmulhw_r2r(r6, r4); /* r4 = c3*i3 - i3 */ \ - movq_m2r(*C(5), r1); \ - pmulhw_r2r(r7, r6); /* r6 = c3*i5 - i5 */ \ - movq_r2r(r1, r5); \ - pmulhw_r2r(r2, r1); /* r1 = c5*i3 - i3 */ \ - movq_m2r(*I(1), r3); \ - pmulhw_r2r(r7, r5); /* r5 = c5*i5 - i5 */ \ - movq_m2r(*C(1), r0); /* (all registers are in use) */ \ - paddw_r2r(r2, r4); /* r4 = c3*i3 */ \ - paddw_r2r(r7, r6); /* r6 = c3*i5 */ \ - paddw_r2r(r1, r2); /* r2 = c5*i3 */ \ - movq_m2r(*J(7), r1); \ - paddw_r2r(r5, r7); /* r7 = c5*i5 */ \ - movq_r2r(r0, r5); /* r5 = c1 */ \ - pmulhw_r2r(r3, r0); /* r0 = c1*i1 - i1 */ \ - paddsw_r2r(r7, r4); /* r4 = C = c3*i3 + c5*i5 */ \ - pmulhw_r2r(r1, r5); /* r5 = c1*i7 - i7 */ \ - movq_m2r(*C(7), r7); \ - psubsw_r2r(r2, r6); /* r6 = D = c3*i5 - c5*i3 */ \ - paddw_r2r(r3, r0); /* r0 = c1*i1 */ \ - pmulhw_r2r(r7, r3); /* r3 = c7*i1 */ \ - movq_m2r(*I(2), r2); \ - pmulhw_r2r(r1, r7); /* r7 = c7*i7 */ \ - paddw_r2r(r1, r5); /* r5 = c1*i7 */ \ - movq_r2r(r2, r1); /* r1 = i2 */ \ - pmulhw_m2r(*C(2), r2); /* r2 = c2*i2 - i2 */ \ - psubsw_r2r(r5, r3); /* r3 = B = c7*i1 - c1*i7 */ \ - movq_m2r(*J(6), r5); \ - paddsw_r2r(r7, r0); /* r0 = A = c1*i1 + c7*i7 */ \ - movq_r2r(r5, r7); /* r7 = i6 */ \ - psubsw_r2r(r4, r0); /* r0 = A - C */ \ - pmulhw_m2r(*C(2), r5); /* r5 = c2*i6 - i6 */ \ - paddw_r2r(r1, r2); /* r2 = c2*i2 */ \ - pmulhw_m2r(*C(6), r1); /* r1 = c6*i2 */ \ - paddsw_r2r(r4, r4); /* r4 = C + C */ \ - paddsw_r2r(r0, r4); /* r4 = C. = A + C */ \ - psubsw_r2r(r6, r3); /* r3 = B - D */ \ - paddw_r2r(r7, r5); /* r5 = c2*i6 */ \ - paddsw_r2r(r6, r6); /* r6 = D + D */ \ - pmulhw_m2r(*C(6), r7); /* r7 = c6*i6 */ \ - paddsw_r2r(r3, r6); /* r6 = D. = B + D */ \ - movq_r2m(r4, *I(1)); /* save C. at I(1) */ \ - psubsw_r2r(r5, r1); /* r1 = H = c6*i2 - c2*i6 */ \ - movq_m2r(*C(4), r4); \ - movq_r2r(r3, r5); /* r5 = B - D */ \ - pmulhw_r2r(r4, r3); /* r3 = (c4 - 1) * (B - D) */ \ - paddsw_r2r(r2, r7); /* r7 = G = c6*i6 + c2*i2 */ \ - movq_r2m(r6, *I(2)); /* save D. at I(2) */ \ - movq_r2r(r0, r2); /* r2 = A - C */ \ - movq_m2r(*I(0), r6); \ - pmulhw_r2r(r4, r0); /* r0 = (c4 - 1) * (A - C) */ \ - paddw_r2r(r3, r5); /* r5 = B. = c4 * (B - D) */ \ - movq_m2r(*J(4), r3); \ - psubsw_r2r(r1, r5); /* r5 = B.. = B. - H */ \ - paddw_r2r(r0, r2); /* r0 = A. = c4 * (A - C) */ \ - psubsw_r2r(r3, r6); /* r6 = i0 - i4 */ \ - movq_r2r(r6, r0); \ - pmulhw_r2r(r4, r6); /* r6 = (c4 - 1) * (i0 - i4) */ \ - paddsw_r2r(r3, r3); /* r3 = i4 + i4 */ \ - paddsw_r2r(r1, r1); /* r1 = H + H */ \ - paddsw_r2r(r0, r3); /* r3 = i0 + i4 */ \ - paddsw_r2r(r5, r1); /* r1 = H. = B + H */ \ - pmulhw_r2r(r3, r4); /* r4 = (c4 - 1) * (i0 + i4) */ \ - paddsw_r2r(r0, r6); /* r6 = F = c4 * (i0 - i4) */ \ - psubsw_r2r(r2, r6); /* r6 = F. = F - A. */ \ - paddsw_r2r(r2, r2); /* r2 = A. + A. */ \ - movq_m2r(*I(1), r0); /* r0 = C. */ \ - paddsw_r2r(r6, r2); /* r2 = A.. = F + A. */ \ - paddw_r2r(r3, r4); /* r4 = E = c4 * (i0 + i4) */ \ - psubsw_r2r(r1, r2); /* r2 = R2 = A.. - H. */ \ -} - -/* RowIDCT gets ready to transpose */ -#define RowIDCT() { \ - \ - BeginIDCT(); \ - \ - movq_m2r(*I(2), r3); /* r3 = D. */ \ - psubsw_r2r(r7, r4); /* r4 = E. = E - G */ \ - paddsw_r2r(r1, r1); /* r1 = H. + H. */ \ - paddsw_r2r(r7, r7); /* r7 = G + G */ \ - paddsw_r2r(r2, r1); /* r1 = R1 = A.. + H. */ \ - paddsw_r2r(r4, r7); /* r7 = G. = E + G */ \ - psubsw_r2r(r3, r4); /* r4 = R4 = E. - D. */ \ - paddsw_r2r(r3, r3); \ - psubsw_r2r(r5, r6); /* r6 = R6 = F. - B.. */ \ - paddsw_r2r(r5, r5); \ - paddsw_r2r(r4, r3); /* r3 = R3 = E. + D. */ \ - paddsw_r2r(r6, r5); /* r5 = R5 = F. + B.. */ \ - psubsw_r2r(r0, r7); /* r7 = R7 = G. - C. */ \ - paddsw_r2r(r0, r0); \ - movq_r2m(r1, *I(1)); /* save R1 */ \ - paddsw_r2r(r7, r0); /* r0 = R0 = G. + C. */ \ -} - -/* Column IDCT normalizes and stores final results */ -#define ColumnIDCT() { \ - \ - BeginIDCT(); \ - \ - paddsw_m2r(*Eight, r2); /* adjust R2 (and R1) for shift */ \ - paddsw_r2r(r1, r1); /* r1 = H. + H. */ \ - paddsw_r2r(r2, r1); /* r1 = R1 = A.. + H. */ \ - psraw_i2r(4, r2); /* r2 = NR2 */ \ - psubsw_r2r(r7, r4); /* r4 = E. = E - G */ \ - psraw_i2r(4, r1); /* r1 = NR1 */ \ - movq_m2r(*I(2), r3); /* r3 = D. */ \ - paddsw_r2r(r7, r7); /* r7 = G + G */ \ - movq_r2m(r2, *I(2)); /* store NR2 at I2 */ \ - paddsw_r2r(r4, r7); /* r7 = G. = E + G */ \ - movq_r2m(r1, *I(1)); /* store NR1 at I1 */ \ - psubsw_r2r(r3, r4); /* r4 = R4 = E. - D. */ \ - paddsw_m2r(*Eight, r4); /* adjust R4 (and R3) for shift */ \ - paddsw_r2r(r3, r3); /* r3 = D. + D. */ \ - paddsw_r2r(r4, r3); /* r3 = R3 = E. + D. */ \ - psraw_i2r(4, r4); /* r4 = NR4 */ \ - psubsw_r2r(r5, r6); /* r6 = R6 = F. - B.. */ \ - psraw_i2r(4, r3); /* r3 = NR3 */ \ - paddsw_m2r(*Eight, r6); /* adjust R6 (and R5) for shift */ \ - paddsw_r2r(r5, r5); /* r5 = B.. + B.. */ \ - paddsw_r2r(r6, r5); /* r5 = R5 = F. + B.. */ \ - psraw_i2r(4, r6); /* r6 = NR6 */ \ - movq_r2m(r4, *J(4)); /* store NR4 at J4 */ \ - psraw_i2r(4, r5); /* r5 = NR5 */ \ - movq_r2m(r3, *I(3)); /* store NR3 at I3 */ \ - psubsw_r2r(r0, r7); /* r7 = R7 = G. - C. */ \ - paddsw_m2r(*Eight, r7); /* adjust R7 (and R0) for shift */ \ - paddsw_r2r(r0, r0); /* r0 = C. + C. */ \ - paddsw_r2r(r7, r0); /* r0 = R0 = G. + C. */ \ - psraw_i2r(4, r7); /* r7 = NR7 */ \ - movq_r2m(r6, *J(6)); /* store NR6 at J6 */ \ - psraw_i2r(4, r0); /* r0 = NR0 */ \ - movq_r2m(r5, *J(5)); /* store NR5 at J5 */ \ - movq_r2m(r7, *J(7)); /* store NR7 at J7 */ \ - movq_r2m(r0, *I(0)); /* store NR0 at I0 */ \ -} - -/* Following macro does two 4x4 transposes in place. - - At entry (we assume): - - r0 = a3 a2 a1 a0 - I(1) = b3 b2 b1 b0 - r2 = c3 c2 c1 c0 - r3 = d3 d2 d1 d0 - - r4 = e3 e2 e1 e0 - r5 = f3 f2 f1 f0 - r6 = g3 g2 g1 g0 - r7 = h3 h2 h1 h0 - - At exit, we have: - - I(0) = d0 c0 b0 a0 - I(1) = d1 c1 b1 a1 - I(2) = d2 c2 b2 a2 - I(3) = d3 c3 b3 a3 - - J(4) = h0 g0 f0 e0 - J(5) = h1 g1 f1 e1 - J(6) = h2 g2 f2 e2 - J(7) = h3 g3 f3 e3 - - I(0) I(1) I(2) I(3) is the transpose of r0 I(1) r2 r3. - J(4) J(5) J(6) J(7) is the transpose of r4 r5 r6 r7. - - Since r1 is free at entry, we calculate the Js first. */ - -#define Transpose() { \ - movq_r2r(r4, r1); /* r1 = e3 e2 e1 e0 */ \ - punpcklwd_r2r(r5, r4); /* r4 = f1 e1 f0 e0 */ \ - movq_r2m(r0, *I(0)); /* save a3 a2 a1 a0 */ \ - punpckhwd_r2r(r5, r1); /* r1 = f3 e3 f2 e2 */ \ - movq_r2r(r6, r0); /* r0 = g3 g2 g1 g0 */ \ - punpcklwd_r2r(r7, r6); /* r6 = h1 g1 h0 g0 */ \ - movq_r2r(r4, r5); /* r5 = f1 e1 f0 e0 */ \ - punpckldq_r2r(r6, r4); /* r4 = h0 g0 f0 e0 = R4 */ \ - punpckhdq_r2r(r6, r5); /* r5 = h1 g1 f1 e1 = R5 */ \ - movq_r2r(r1, r6); /* r6 = f3 e3 f2 e2 */ \ - movq_r2m(r4, *J(4)); \ - punpckhwd_r2r(r7, r0); /* r0 = h3 g3 h2 g2 */ \ - movq_r2m(r5, *J(5)); \ - punpckhdq_r2r(r0, r6); /* r6 = h3 g3 f3 e3 = R7 */ \ - movq_m2r(*I(0), r4); /* r4 = a3 a2 a1 a0 */ \ - punpckldq_r2r(r0, r1); /* r1 = h2 g2 f2 e2 = R6 */ \ - movq_m2r(*I(1), r5); /* r5 = b3 b2 b1 b0 */ \ - movq_r2r(r4, r0); /* r0 = a3 a2 a1 a0 */ \ - movq_r2m(r6, *J(7)); \ - punpcklwd_r2r(r5, r0); /* r0 = b1 a1 b0 a0 */ \ - movq_r2m(r1, *J(6)); \ - punpckhwd_r2r(r5, r4); /* r4 = b3 a3 b2 a2 */ \ - movq_r2r(r2, r5); /* r5 = c3 c2 c1 c0 */ \ - punpcklwd_r2r(r3, r2); /* r2 = d1 c1 d0 c0 */ \ - movq_r2r(r0, r1); /* r1 = b1 a1 b0 a0 */ \ - punpckldq_r2r(r2, r0); /* r0 = d0 c0 b0 a0 = R0 */ \ - punpckhdq_r2r(r2, r1); /* r1 = d1 c1 b1 a1 = R1 */ \ - movq_r2r(r4, r2); /* r2 = b3 a3 b2 a2 */ \ - movq_r2m(r0, *I(0)); \ - punpckhwd_r2r(r3, r5); /* r5 = d3 c3 d2 c2 */ \ - movq_r2m(r1, *I(1)); \ - punpckhdq_r2r(r5, r4); /* r4 = d3 c3 b3 a3 = R3 */ \ - punpckldq_r2r(r5, r2); /* r2 = d2 c2 b2 a2 = R2 */ \ - movq_r2m(r4, *I(3)); \ - movq_r2m(r2, *I(2)); \ -} - -void ff_vp3_dsp_init_mmx(void) -{ - int j = 16; - uint16_t *p; - - j = 1; - do { - p = idct_constants + ((j + 3) << 2); - p[0] = p[1] = p[2] = p[3] = idct_cosine_table[j - 1]; - } while (++j <= 7); - - idct_constants[44] = idct_constants[45] = - idct_constants[46] = idct_constants[47] = IdctAdjustBeforeShift; -} - -void ff_vp3_idct_mmx(int16_t *output_data) -{ - /* eax = quantized input - * ebx = dequantizer matrix - * ecx = IDCT constants - * M(I) = ecx + MaskOffset(0) + I * 8 - * C(I) = ecx + CosineOffset(32) + (I-1) * 8 - * edx = output - * r0..r7 = mm0..mm7 - */ - -#define C(x) (idct_constants + 16 + (x - 1) * 4) -#define Eight (idct_constants + 44) - - /* at this point, function has completed dequantization + dezigzag + - * partial transposition; now do the idct itself */ -#define I(K) (output_data + K * 8) -#define J(K) (output_data + ((K - 4) * 8) + 4) - - RowIDCT(); - Transpose(); - -#undef I -#undef J -#define I(K) (output_data + (K * 8) + 32) -#define J(K) (output_data + ((K - 4) * 8) + 36) - - RowIDCT(); - Transpose(); - -#undef I -#undef J -#define I(K) (output_data + K * 8) -#define J(K) (output_data + K * 8) - - ColumnIDCT(); - -#undef I -#undef J -#define I(K) (output_data + (K * 8) + 4) -#define J(K) (output_data + (K * 8) + 4) - - ColumnIDCT(); - -#undef I -#undef J - -} diff --git a/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c b/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c deleted file mode 100644 index bd2911d59..000000000 --- a/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c +++ /dev/null @@ -1,827 +0,0 @@ -/* - * Copyright (C) 2004 the ffmpeg project - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file vp3dsp_sse2.c - * SSE2-optimized functions cribbed from the original VP3 source code. - */ - -#include "../dsputil.h" -#include "mmx.h" - -static DECLARE_ALIGNED_16(const unsigned short, SSE2_dequant_const[]) = -{ - 0,65535,65535,0,0,0,0,0, // 0x0000 0000 0000 0000 0000 FFFF FFFF 0000 - 0,0,0,0,65535,65535,0,0, // 0x0000 0000 FFFF FFFF 0000 0000 0000 0000 - 65535,65535,65535,0,0,0,0,0,// 0x0000 0000 0000 0000 0000 FFFF FFFF FFFF - 0,0,0,65535,0,0,0,0, // 0x0000 0000 0000 0000 FFFF 0000 0000 0000 - 0,0,0,65535,65535,0,0,0, // 0x0000 0000 0000 FFFF FFFF 0000 0000 0000 - 65535,0,0,0,0,65535,0,0, // 0x0000 0000 FFFF 0000 0000 0000 0000 FFFF - 0,0,65535,65535, 0,0,0,0 // 0x0000 0000 0000 0000 FFFF FFFF 0000 0000 -}; - -static DECLARE_ALIGNED_16(const unsigned int, eight_data[]) = -{ - 0x00080008, - 0x00080008, - 0x00080008, - 0x00080008 -}; - -static DECLARE_ALIGNED_16(const unsigned short, SSE2_idct_data[7 * 8]) = -{ - 64277,64277,64277,64277,64277,64277,64277,64277, - 60547,60547,60547,60547,60547,60547,60547,60547, - 54491,54491,54491,54491,54491,54491,54491,54491, - 46341,46341,46341,46341,46341,46341,46341,46341, - 36410,36410,36410,36410,36410,36410,36410,36410, - 25080,25080,25080,25080,25080,25080,25080,25080, - 12785,12785,12785,12785,12785,12785,12785,12785 -}; - - -#define SSE2_Column_IDCT() { \ - \ - movdqu_m2r(*I(3), xmm2); /* xmm2 = i3 */ \ - movdqu_m2r(*C(3), xmm6); /* xmm6 = c3 */ \ - \ - movdqu_r2r(xmm2, xmm4); /* xmm4 = i3 */ \ - movdqu_m2r(*I(5), xmm7); /* xmm7 = i5 */ \ - \ - pmulhw_r2r(xmm6, xmm4); /* xmm4 = c3 * i3 - i3 */ \ - movdqu_m2r(*C(5), xmm1); /* xmm1 = c5 */ \ - \ - pmulhw_r2r(xmm7, xmm6); /* xmm6 = c3 * i5 - i5 */ \ - movdqu_r2r(xmm1, xmm5); /* xmm5 = c5 */ \ - \ - pmulhw_r2r(xmm2, xmm1); /* xmm1 = c5 * i3 - i3 */ \ - movdqu_m2r(*I(1), xmm3); /* xmm3 = i1 */ \ - \ - pmulhw_r2r(xmm7, xmm5); /* xmm5 = c5 * i5 - i5 */ \ - movdqu_m2r(*C(1), xmm0); /* xmm0 = c1 */ \ - \ - /* all registers are in use */ \ - \ - paddw_r2r(xmm2, xmm4); /* xmm4 = c3 * i3 */ \ - paddw_r2r(xmm7, xmm6); /* xmm6 = c3 * i5 */ \ - \ - paddw_r2r(xmm1, xmm2); /* xmm2 = c5 * i3 */ \ - movdqu_m2r(*I(7), xmm1); /* xmm1 = i7 */ \ - \ - paddw_r2r(xmm5, xmm7); /* xmm7 = c5 * i5 */ \ - movdqu_r2r(xmm0, xmm5); /* xmm5 = c1 */ \ - \ - pmulhw_r2r(xmm3, xmm0); /* xmm0 = c1 * i1 - i1 */ \ - paddsw_r2r(xmm7, xmm4); /* xmm4 = c3 * i3 + c5 * i5 = C */ \ - \ - pmulhw_r2r(xmm1, xmm5); /* xmm5 = c1 * i7 - i7 */ \ - movdqu_m2r(*C(7), xmm7); /* xmm7 = c7 */ \ - \ - psubsw_r2r(xmm2, xmm6); /* xmm6 = c3 * i5 - c5 * i3 = D */ \ - paddw_r2r(xmm3, xmm0); /* xmm0 = c1 * i1 */ \ - \ - pmulhw_r2r(xmm7, xmm3); /* xmm3 = c7 * i1 */ \ - movdqu_m2r(*I(2), xmm2); /* xmm2 = i2 */ \ - \ - pmulhw_r2r(xmm1, xmm7); /* xmm7 = c7 * i7 */ \ - paddw_r2r(xmm1, xmm5); /* xmm5 = c1 * i7 */ \ - \ - movdqu_r2r(xmm2, xmm1); /* xmm1 = i2 */ \ - pmulhw_m2r(*C(2), xmm2); /* xmm2 = i2 * c2 -i2 */ \ - \ - psubsw_r2r(xmm5, xmm3); /* xmm3 = c7 * i1 - c1 * i7 = B */ \ - movdqu_m2r(*I(6), xmm5); /* xmm5 = i6 */ \ - \ - paddsw_r2r(xmm7, xmm0); /* xmm0 = c1 * i1 + c7 * i7 = A */ \ - movdqu_r2r(xmm5, xmm7); /* xmm7 = i6 */ \ - \ - psubsw_r2r(xmm4, xmm0); /* xmm0 = A - C */ \ - pmulhw_m2r(*C(2), xmm5); /* xmm5 = c2 * i6 - i6 */ \ - \ - paddw_r2r(xmm1, xmm2); /* xmm2 = i2 * c2 */ \ - pmulhw_m2r(*C(6), xmm1); /* xmm1 = c6 * i2 */ \ - \ - paddsw_r2r(xmm4, xmm4); /* xmm4 = C + C */ \ - paddsw_r2r(xmm0, xmm4); /* xmm4 = A + C = C. */ \ - \ - psubsw_r2r(xmm6, xmm3); /* xmm3 = B - D */ \ - paddw_r2r(xmm7, xmm5); /* xmm5 = c2 * i6 */ \ - \ - paddsw_r2r(xmm6, xmm6); /* xmm6 = D + D */ \ - pmulhw_m2r(*C(6), xmm7); /* xmm7 = c6 * i6 */ \ - \ - paddsw_r2r(xmm3, xmm6); /* xmm6 = B + D = D. */ \ - movdqu_r2m(xmm4, *I(1)); /* Save C. at I(1) */ \ - \ - psubsw_r2r(xmm5, xmm1); /* xmm1 = c6 * i2 - c2 * i6 = H */ \ - movdqu_m2r(*C(4), xmm4); /* xmm4 = c4 */ \ - \ - movdqu_r2r(xmm3, xmm5); /* xmm5 = B - D */ \ - pmulhw_r2r(xmm4, xmm3); /* xmm3 = ( c4 -1 ) * ( B - D ) */ \ - \ - paddsw_r2r(xmm2, xmm7); /* xmm7 = c2 * i2 + c6 * i6 = G */ \ - movdqu_r2m(xmm6, *I(2)); /* Save D. at I(2) */ \ - \ - movdqu_r2r(xmm0, xmm2); /* xmm2 = A - C */ \ - movdqu_m2r(*I(0), xmm6); /* xmm6 = i0 */ \ - \ - pmulhw_r2r(xmm4, xmm0); /* xmm0 = ( c4 - 1 ) * ( A - C ) = A. */ \ - paddw_r2r(xmm3, xmm5); /* xmm5 = c4 * ( B - D ) = B. */ \ - \ - movdqu_m2r(*I(4), xmm3); /* xmm3 = i4 */ \ - psubsw_r2r(xmm1, xmm5); /* xmm5 = B. - H = B.. */ \ - \ - paddw_r2r(xmm0, xmm2); /* xmm2 = c4 * ( A - C) = A. */ \ - psubsw_r2r(xmm3, xmm6); /* xmm6 = i0 - i4 */ \ - \ - movdqu_r2r(xmm6, xmm0); /* xmm0 = i0 - i4 */ \ - pmulhw_r2r(xmm4, xmm6); /* xmm6 = (c4 - 1) * (i0 - i4) = F */ \ - \ - paddsw_r2r(xmm3, xmm3); /* xmm3 = i4 + i4 */ \ - paddsw_r2r(xmm1, xmm1); /* xmm1 = H + H */ \ - \ - paddsw_r2r(xmm0, xmm3); /* xmm3 = i0 + i4 */ \ - paddsw_r2r(xmm5, xmm1); /* xmm1 = B. + H = H. */ \ - \ - pmulhw_r2r(xmm3, xmm4); /* xmm4 = ( c4 - 1 ) * ( i0 + i4 ) */ \ - paddw_r2r(xmm0, xmm6); /* xmm6 = c4 * ( i0 - i4 ) */ \ - \ - psubsw_r2r(xmm2, xmm6); /* xmm6 = F - A. = F. */ \ - paddsw_r2r(xmm2, xmm2); /* xmm2 = A. + A. */ \ - \ - movdqu_m2r(*I(1), xmm0); /* Load C. from I(1) */ \ - paddsw_r2r(xmm6, xmm2); /* xmm2 = F + A. = A.. */ \ - \ - paddw_r2r(xmm3, xmm4); /* xmm4 = c4 * ( i0 + i4 ) = 3 */ \ - psubsw_r2r(xmm1, xmm2); /* xmm2 = A.. - H. = R2 */ \ - \ - paddsw_m2r(*Eight, xmm2); /* Adjust R2 and R1 before shifting */ \ - paddsw_r2r(xmm1, xmm1); /* xmm1 = H. + H. */ \ - \ - paddsw_r2r(xmm2, xmm1); /* xmm1 = A.. + H. = R1 */ \ - psraw_i2r(4, xmm2); /* xmm2 = op2 */ \ - \ - psubsw_r2r(xmm7, xmm4); /* xmm4 = E - G = E. */ \ - psraw_i2r(4, xmm1); /* xmm1 = op1 */ \ - \ - movdqu_m2r(*I(2), xmm3); /* Load D. from I(2) */ \ - paddsw_r2r(xmm7, xmm7); /* xmm7 = G + G */ \ - \ - movdqu_r2m(xmm2, *O(2)); /* Write out op2 */ \ - paddsw_r2r(xmm4, xmm7); /* xmm7 = E + G = G. */ \ - \ - movdqu_r2m(xmm1, *O(1)); /* Write out op1 */ \ - psubsw_r2r(xmm3, xmm4); /* xmm4 = E. - D. = R4 */ \ - \ - paddsw_m2r(*Eight, xmm4); /* Adjust R4 and R3 before shifting */ \ - paddsw_r2r(xmm3, xmm3); /* xmm3 = D. + D. */ \ - \ - paddsw_r2r(xmm4, xmm3); /* xmm3 = E. + D. = R3 */ \ - psraw_i2r(4, xmm4); /* xmm4 = op4 */ \ - \ - psubsw_r2r(xmm5, xmm6); /* xmm6 = F. - B..= R6 */ \ - psraw_i2r(4, xmm3); /* xmm3 = op3 */ \ - \ - paddsw_m2r(*Eight, xmm6); /* Adjust R6 and R5 before shifting */ \ - paddsw_r2r(xmm5, xmm5); /* xmm5 = B.. + B.. */ \ - \ - paddsw_r2r(xmm6, xmm5); /* xmm5 = F. + B.. = R5 */ \ - psraw_i2r(4, xmm6); /* xmm6 = op6 */ \ - \ - movdqu_r2m(xmm4, *O(4)); /* Write out op4 */ \ - psraw_i2r(4, xmm5); /* xmm5 = op5 */ \ - \ - movdqu_r2m(xmm3, *O(3)); /* Write out op3 */ \ - psubsw_r2r(xmm0, xmm7); /* xmm7 = G. - C. = R7 */ \ - \ - paddsw_m2r(*Eight, xmm7); /* Adjust R7 and R0 before shifting */ \ - paddsw_r2r(xmm0, xmm0); /* xmm0 = C. + C. */ \ - \ - paddsw_r2r(xmm7, xmm0); /* xmm0 = G. + C. */ \ - psraw_i2r(4, xmm7); /* xmm7 = op7 */ \ - \ - movdqu_r2m(xmm6, *O(6)); /* Write out op6 */ \ - psraw_i2r(4, xmm0); /* xmm0 = op0 */ \ - \ - movdqu_r2m(xmm5, *O(5)); /* Write out op5 */ \ - movdqu_r2m(xmm7, *O(7)); /* Write out op7 */ \ - \ - movdqu_r2m(xmm0, *O(0)); /* Write out op0 */ \ - \ -} /* End of SSE2_Column_IDCT macro */ - - -#define SSE2_Row_IDCT() { \ - \ - movdqu_m2r(*I(3), xmm2); /* xmm2 = i3 */ \ - movdqu_m2r(*C(3), xmm6); /* xmm6 = c3 */ \ - \ - movdqu_r2r(xmm2, xmm4); /* xmm4 = i3 */ \ - movdqu_m2r(*I(5), xmm7); /* xmm7 = i5 */ \ - \ - pmulhw_r2r(xmm6, xmm4); /* xmm4 = c3 * i3 - i3 */ \ - movdqu_m2r(*C(5), xmm1); /* xmm1 = c5 */ \ - \ - pmulhw_r2r(xmm7, xmm6); /* xmm6 = c3 * i5 - i5 */ \ - movdqu_r2r(xmm1, xmm5); /* xmm5 = c5 */ \ - \ - pmulhw_r2r(xmm2, xmm1); /* xmm1 = c5 * i3 - i3 */ \ - movdqu_m2r(*I(1), xmm3); /* xmm3 = i1 */ \ - \ - pmulhw_r2r(xmm7, xmm5); /* xmm5 = c5 * i5 - i5 */ \ - movdqu_m2r(*C(1), xmm0); /* xmm0 = c1 */ \ - \ - /* all registers are in use */ \ - \ - paddw_r2r(xmm2, xmm4); /* xmm4 = c3 * i3 */ \ - paddw_r2r(xmm7, xmm6); /* xmm6 = c3 * i5 */ \ - \ - paddw_r2r(xmm1, xmm2); /* xmm2 = c5 * i3 */ \ - movdqu_m2r(*I(7), xmm1); /* xmm1 = i7 */ \ - \ - paddw_r2r(xmm5, xmm7); /* xmm7 = c5 * i5 */ \ - movdqu_r2r(xmm0, xmm5); /* xmm5 = c1 */ \ - \ - pmulhw_r2r(xmm3, xmm0); /* xmm0 = c1 * i1 - i1 */ \ - paddsw_r2r(xmm7, xmm4); /* xmm4 = c3 * i3 + c5 * i5 = C */ \ - \ - pmulhw_r2r(xmm1, xmm5); /* xmm5 = c1 * i7 - i7 */ \ - movdqu_m2r(*C(7), xmm7); /* xmm7 = c7 */ \ - \ - psubsw_r2r(xmm2, xmm6); /* xmm6 = c3 * i5 - c5 * i3 = D */ \ - paddw_r2r(xmm3, xmm0); /* xmm0 = c1 * i1 */ \ - \ - pmulhw_r2r(xmm7, xmm3); /* xmm3 = c7 * i1 */ \ - movdqu_m2r(*I(2), xmm2); /* xmm2 = i2 */ \ - \ - pmulhw_r2r(xmm1, xmm7); /* xmm7 = c7 * i7 */ \ - paddw_r2r(xmm1, xmm5); /* xmm5 = c1 * i7 */ \ - \ - movdqu_r2r(xmm2, xmm1); /* xmm1 = i2 */ \ - pmulhw_m2r(*C(2), xmm2); /* xmm2 = i2 * c2 -i2 */ \ - \ - psubsw_r2r(xmm5, xmm3); /* xmm3 = c7 * i1 - c1 * i7 = B */ \ - movdqu_m2r(*I(6), xmm5); /* xmm5 = i6 */ \ - \ - paddsw_r2r(xmm7, xmm0); /* xmm0 = c1 * i1 + c7 * i7 = A */ \ - movdqu_r2r(xmm5, xmm7); /* xmm7 = i6 */ \ - \ - psubsw_r2r(xmm4, xmm0); /* xmm0 = A - C */ \ - pmulhw_m2r(*C(2), xmm5); /* xmm5 = c2 * i6 - i6 */ \ - \ - paddw_r2r(xmm1, xmm2); /* xmm2 = i2 * c2 */ \ - pmulhw_m2r(*C(6), xmm1); /* xmm1 = c6 * i2 */ \ - \ - paddsw_r2r(xmm4, xmm4); /* xmm4 = C + C */ \ - paddsw_r2r(xmm0, xmm4); /* xmm4 = A + C = C. */ \ - \ - psubsw_r2r(xmm6, xmm3); /* xmm3 = B - D */ \ - paddw_r2r(xmm7, xmm5); /* xmm5 = c2 * i6 */ \ - \ - paddsw_r2r(xmm6, xmm6); /* xmm6 = D + D */ \ - pmulhw_m2r(*C(6), xmm7); /* xmm7 = c6 * i6 */ \ - \ - paddsw_r2r(xmm3, xmm6); /* xmm6 = B + D = D. */ \ - movdqu_r2m(xmm4, *I(1)); /* Save C. at I(1) */ \ - \ - psubsw_r2r(xmm5, xmm1); /* xmm1 = c6 * i2 - c2 * i6 = H */ \ - movdqu_m2r(*C(4), xmm4); /* xmm4 = c4 */ \ - \ - movdqu_r2r(xmm3, xmm5); /* xmm5 = B - D */ \ - pmulhw_r2r(xmm4, xmm3); /* xmm3 = ( c4 -1 ) * ( B - D ) */ \ - \ - paddsw_r2r(xmm2, xmm7); /* xmm7 = c2 * i2 + c6 * i6 = G */ \ - movdqu_r2m(xmm6, *I(2)); /* Save D. at I(2) */ \ - \ - movdqu_r2r(xmm0, xmm2); /* xmm2 = A - C */ \ - movdqu_m2r(*I(0), xmm6); /* xmm6 = i0 */ \ - \ - pmulhw_r2r(xmm4, xmm0); /* xmm0 = ( c4 - 1 ) * ( A - C ) = A. */ \ - paddw_r2r(xmm3, xmm5); /* xmm5 = c4 * ( B - D ) = B. */ \ - \ - movdqu_m2r(*I(4), xmm3); /* xmm3 = i4 */ \ - psubsw_r2r(xmm1, xmm5); /* xmm5 = B. - H = B.. */ \ - \ - paddw_r2r(xmm0, xmm2); /* xmm2 = c4 * ( A - C) = A. */ \ - psubsw_r2r(xmm3, xmm6); /* xmm6 = i0 - i4 */ \ - \ - movdqu_r2r(xmm6, xmm0); /* xmm0 = i0 - i4 */ \ - pmulhw_r2r(xmm4, xmm6); /* xmm6 = ( c4 - 1 ) * ( i0 - i4 ) = F */ \ - \ - paddsw_r2r(xmm3, xmm3); /* xmm3 = i4 + i4 */ \ - paddsw_r2r(xmm1, xmm1); /* xmm1 = H + H */ \ - \ - paddsw_r2r(xmm0, xmm3); /* xmm3 = i0 + i4 */ \ - paddsw_r2r(xmm5, xmm1); /* xmm1 = B. + H = H. */ \ - \ - pmulhw_r2r(xmm3, xmm4); /* xmm4 = ( c4 - 1 ) * ( i0 + i4 ) */ \ - paddw_r2r(xmm0, xmm6); /* xmm6 = c4 * ( i0 - i4 ) */ \ - \ - psubsw_r2r(xmm2, xmm6); /* xmm6 = F - A. = F. */ \ - paddsw_r2r(xmm2, xmm2); /* xmm2 = A. + A. */ \ - \ - movdqu_m2r(*I(1), xmm0); /* Load C. from I(1) */ \ - paddsw_r2r(xmm6, xmm2); /* xmm2 = F + A. = A.. */ \ - \ - paddw_r2r(xmm3, xmm4); /* xmm4 = c4 * ( i0 + i4 ) = 3 */ \ - psubsw_r2r(xmm1, xmm2); /* xmm2 = A.. - H. = R2 */ \ - \ - paddsw_r2r(xmm1, xmm1); /* xmm1 = H. + H. */ \ - paddsw_r2r(xmm2, xmm1); /* xmm1 = A.. + H. = R1 */ \ - \ - psubsw_r2r(xmm7, xmm4); /* xmm4 = E - G = E. */ \ - \ - movdqu_m2r(*I(2), xmm3); /* Load D. from I(2) */ \ - paddsw_r2r(xmm7, xmm7); /* xmm7 = G + G */ \ - \ - movdqu_r2m(xmm2, *I(2)); /* Write out op2 */ \ - paddsw_r2r(xmm4, xmm7); /* xmm7 = E + G = G. */ \ - \ - movdqu_r2m(xmm1, *I(1)); /* Write out op1 */ \ - psubsw_r2r(xmm3, xmm4); /* xmm4 = E. - D. = R4 */ \ - \ - paddsw_r2r(xmm3, xmm3); /* xmm3 = D. + D. */ \ - \ - paddsw_r2r(xmm4, xmm3); /* xmm3 = E. + D. = R3 */ \ - \ - psubsw_r2r(xmm5, xmm6); /* xmm6 = F. - B..= R6 */ \ - \ - paddsw_r2r(xmm5, xmm5); /* xmm5 = B.. + B.. */ \ - \ - paddsw_r2r(xmm6, xmm5); /* xmm5 = F. + B.. = R5 */ \ - \ - movdqu_r2m(xmm4, *I(4)); /* Write out op4 */ \ - \ - movdqu_r2m(xmm3, *I(3)); /* Write out op3 */ \ - psubsw_r2r(xmm0, xmm7); /* xmm7 = G. - C. = R7 */ \ - \ - paddsw_r2r(xmm0, xmm0); /* xmm0 = C. + C. */ \ - \ - paddsw_r2r(xmm7, xmm0); /* xmm0 = G. + C. */ \ - \ - movdqu_r2m(xmm6, *I(6)); /* Write out op6 */ \ - \ - movdqu_r2m(xmm5, *I(5)); /* Write out op5 */ \ - movdqu_r2m(xmm7, *I(7)); /* Write out op7 */ \ - \ - movdqu_r2m(xmm0, *I(0)); /* Write out op0 */ \ - \ -} /* End of SSE2_Row_IDCT macro */ - - -#define SSE2_Transpose() { \ - \ - movdqu_m2r(*I(4), xmm4); /* xmm4=e7e6e5e4e3e2e1e0 */ \ - movdqu_m2r(*I(5), xmm0); /* xmm4=f7f6f5f4f3f2f1f0 */ \ - \ - movdqu_r2r(xmm4, xmm5); /* make a copy */ \ - punpcklwd_r2r(xmm0, xmm4); /* xmm4=f3e3f2e2f1e1f0e0 */ \ - \ - punpckhwd_r2r(xmm0, xmm5); /* xmm5=f7e7f6e6f5e5f4e4 */ \ - movdqu_m2r(*I(6), xmm6); /* xmm6=g7g6g5g4g3g2g1g0 */ \ - \ - movdqu_m2r(*I(7), xmm0); /* xmm0=h7h6h5h4h3h2h1h0 */ \ - movdqu_r2r(xmm6, xmm7); /* make a copy */ \ - \ - punpcklwd_r2r(xmm0, xmm6); /* xmm6=h3g3h3g2h1g1h0g0 */ \ - punpckhwd_r2r(xmm0, xmm7); /* xmm7=h7g7h6g6h5g5h4g4 */ \ - \ - movdqu_r2r(xmm4, xmm3); /* make a copy */ \ - punpckldq_r2r(xmm6, xmm4); /* xmm4=h1g1f1e1h0g0f0e0 */ \ - \ - punpckhdq_r2r(xmm6, xmm3); /* xmm3=h3g3g3e3h2g2f2e2 */ \ - movdqu_r2m(xmm3, *I(6)); /* save h3g3g3e3h2g2f2e2 */ \ - /* Free xmm6 */ \ - movdqu_r2r(xmm5, xmm6); /* make a copy */ \ - punpckldq_r2r(xmm7, xmm5); /* xmm5=h5g5f5e5h4g4f4e4 */ \ - \ - punpckhdq_r2r(xmm7, xmm6); /* xmm6=h7g7f7e7h6g6f6e6 */ \ - movdqu_m2r(*I(0), xmm0); /* xmm0=a7a6a5a4a3a2a1a0 */ \ - /* Free xmm7 */ \ - movdqu_m2r(*I(1), xmm1); /* xmm1=b7b6b5b4b3b2b1b0 */ \ - movdqu_r2r(xmm0, xmm7); /* make a copy */ \ - \ - punpcklwd_r2r(xmm1, xmm0); /* xmm0=b3a3b2a2b1a1b0a0 */ \ - punpckhwd_r2r(xmm1, xmm7); /* xmm7=b7a7b6a6b5a5b4a4 */ \ - /* Free xmm1 */ \ - movdqu_m2r(*I(2), xmm2); /* xmm2=c7c6c5c4c3c2c1c0 */ \ - movdqu_m2r(*I(3), xmm3); /* xmm3=d7d6d5d4d3d2d1d0 */ \ - \ - movdqu_r2r(xmm2, xmm1); /* make a copy */ \ - punpcklwd_r2r(xmm3, xmm2); /* xmm2=d3c3d2c2d1c1d0c0 */ \ - \ - punpckhwd_r2r(xmm3, xmm1); /* xmm1=d7c7d6c6d5c5d4c4 */ \ - movdqu_r2r(xmm0, xmm3); /* make a copy */ \ - \ - punpckldq_r2r(xmm2, xmm0); /* xmm0=d1c1b1a1d0c0b0a0 */ \ - punpckhdq_r2r(xmm2, xmm3); /* xmm3=d3c3b3a3d2c2b2a2 */ \ - /* Free xmm2 */ \ - movdqu_r2r(xmm7, xmm2); /* make a copy */ \ - punpckldq_r2r(xmm1, xmm2); /* xmm2=d5c5b5a5d4c4b4a4 */ \ - \ - punpckhdq_r2r(xmm1, xmm7); /* xmm7=d7c7b7a7d6c6b6a6 */ \ - movdqu_r2r(xmm0, xmm1); /* make a copy */ \ - \ - punpcklqdq_r2r(xmm4, xmm0); /* xmm0=h0g0f0e0d0c0b0a0 */ \ - punpckhqdq_r2r(xmm4, xmm1); /* xmm1=h1g1g1e1d1c1b1a1 */ \ - \ - movdqu_r2m(xmm0, *I(0)); /* save I(0) */ \ - movdqu_r2m(xmm1, *I(1)); /* save I(1) */ \ - \ - movdqu_m2r(*I(6), xmm0); /* load h3g3g3e3h2g2f2e2 */ \ - movdqu_r2r(xmm3, xmm1); /* make a copy */ \ - \ - punpcklqdq_r2r(xmm0, xmm1); /* xmm1=h2g2f2e2d2c2b2a2 */ \ - punpckhqdq_r2r(xmm0, xmm3); /* xmm3=h3g3f3e3d3c3b3a3 */ \ - \ - movdqu_r2r(xmm2, xmm4); /* make a copy */ \ - punpcklqdq_r2r(xmm5, xmm4); /* xmm4=h4g4f4e4d4c4b4a4 */ \ - \ - punpckhqdq_r2r(xmm5, xmm2); /* xmm2=h5g5f5e5d5c5b5a5 */ \ - movdqu_r2m(xmm1, *I(2)); /* save I(2) */ \ - \ - movdqu_r2m(xmm3, *I(3)); /* save I(3) */ \ - movdqu_r2m(xmm4, *I(4)); /* save I(4) */ \ - \ - movdqu_r2m(xmm2, *I(5)); /* save I(5) */ \ - movdqu_r2r(xmm7, xmm5); /* make a copy */ \ - \ - punpcklqdq_r2r(xmm6, xmm5); /* xmm5=h6g6f6e6d6c6b6a6 */ \ - punpckhqdq_r2r(xmm6, xmm7); /* xmm7=h7g7f7e7d7c7b7a7 */ \ - \ - movdqu_r2m(xmm5, *I(6)); /* save I(6) */ \ - movdqu_r2m(xmm7, *I(7)); /* save I(7) */ \ - \ -} /* End of Transpose Macro */ - - -#define SSE2_Dequantize() { \ - movdqu_m2r(*(eax), xmm0); \ - \ - pmullw_m2r(*(ebx), xmm0); /* xmm0 = 07 06 05 04 03 02 01 00 */ \ - movdqu_m2r(*(eax + 16), xmm1); \ - \ - pmullw_m2r(*(ebx + 16), xmm1); /* xmm1 = 17 16 15 14 13 12 11 10 */ \ - pshuflw_r2r(xmm0, xmm3, 0x078); /* xmm3 = 07 06 05 04 01 03 02 00 */ \ - \ - movdqu_r2r(xmm1, xmm2); /* xmm2 = 17 16 15 14 13 12 11 10 */ \ - movdqu_m2r(*(ecx), xmm7); /* xmm7 = -- -- -- -- -- FF FF -- */ \ - \ - movdqu_m2r(*(eax + 32), xmm4); \ - movdqu_m2r(*(eax + 64), xmm5); \ - \ - pmullw_m2r(*(ebx + 32), xmm4); /* xmm4 = 27 26 25 24 23 22 21 20 */ \ - pmullw_m2r(*(ebx + 64), xmm5); /* xmm5 = 47 46 45 44 43 42 41 40 */ \ - \ - movdqu_m2r(*(ecx + 16), xmm6); /* xmm6 = -- -- FF FF -- -- -- -- */ \ - pand_r2r(xmm2, xmm7); /* xmm7 = -- -- -- -- -- 12 11 -- */ \ - \ - pand_r2r(xmm4, xmm6); /* xmm6 = -- -- 25 24 -- -- -- -- */ \ - pxor_r2r(xmm7, xmm2); /* xmm2 = 17 16 15 14 13 -- -- 10 */ \ - \ - pxor_r2r(xmm6, xmm4); /* xmm4 = 27 26 -- -- 23 22 21 20 */ \ - pslldq_i2r(4, xmm7); /* xmm7 = -- -- -- 12 11 -- -- -- */ \ - \ - pslldq_i2r(2, xmm6); /* xmm6 = -- 25 24 -- -- -- -- -- */ \ - por_r2r(xmm6, xmm7); /* xmm7 = -- 25 24 12 11 -- -- -- */ \ - \ - movdqu_m2r(*(ecx + 32), xmm0); /* xmm0 = -- -- -- -- -- FF FF FF */ \ - movdqu_m2r(*(ecx + 48), xmm6); /* xmm6 = -- -- -- -- FF -- -- -- */ \ - \ - pand_r2r(xmm3, xmm0); /* xmm0 = -- -- -- -- -- 03 02 00 */ \ - pand_r2r(xmm5, xmm6); /* xmm6 = -- -- -- -- 43 -- -- -- */ \ - \ - pxor_r2r(xmm0, xmm3); /* xmm3 = 07 06 05 04 01 -- -- -- */ \ - pxor_r2r(xmm6, xmm5); /* xmm5 = 47 46 45 44 -- 42 41 40 */ \ - \ - por_r2r(xmm7, xmm0); /* xmm0 = -- 25 24 12 11 03 02 00 */ \ - pslldq_i2r(8, xmm6); /* xmm6 = 43 -- -- -- -- -- -- -- */ \ - \ - por_r2r(xmm6, xmm0); /* xmm0 = 43 25 24 12 11 03 02 00 */ \ - /* 02345 in use */ \ - \ - movdqu_m2r(*(ecx + 64 ), xmm1); /* xmm1 = -- -- -- FF FF -- -- -- */ \ - pshuflw_r2r(xmm5, xmm5, 0x0B4); /* xmm5 = 47 46 45 44 42 -- 41 40 */ \ - \ - movdqu_r2r(xmm1, xmm7); /* xmm7 = -- -- -- FF FF -- -- -- */ \ - movdqu_r2r(xmm1, xmm6); /* xmm6 = -- -- -- FF FF -- -- -- */ \ - \ - movdqu_r2m(xmm0, *(eax)); /* write 43 25 24 12 11 03 02 00 */ \ - pshufhw_r2r(xmm4, xmm4, 0x0C2); /* xmm4 = 27 -- -- 26 23 22 21 20 */ \ - \ - pand_r2r(xmm4, xmm7); /* xmm7 = -- -- -- 26 23 -- -- -- */ \ - pand_r2r(xmm5, xmm1); /* xmm1 = -- -- -- 44 42 -- -- -- */ \ - \ - pxor_r2r(xmm7, xmm4); /* xmm4 = 27 -- -- -- -- 22 21 20 */ \ - pxor_r2r(xmm1, xmm5); /* xmm5 = 47 46 45 -- -- -- 41 40 */ \ - \ - pshuflw_r2r(xmm2, xmm2, 0x0C6); /* xmm2 = 17 16 15 14 13 10 -- -- */ \ - movdqu_r2r(xmm6, xmm0); /* xmm0 = -- -- -- FF FF -- -- -- */ \ - \ - pslldq_i2r(2, xmm7); /* xmm7 = -- -- 26 23 -- -- -- -- */ \ - pslldq_i2r(6, xmm1); /* xmm1 = 44 42 -- -- -- -- -- -- */ \ - \ - psrldq_i2r(2, xmm0); /* xmm0 = -- -- -- -- FF FF -- -- */ \ - pand_r2r(xmm3, xmm6); /* xmm6 = -- -- -- 04 01 -- -- -- */ \ - \ - pand_r2r(xmm2, xmm0); /* xmm0 = -- -- -- -- 13 10 -- -- */ \ - pxor_r2r(xmm6, xmm3); /* xmm3 = 07 06 05 -- -- -- -- -- */ \ - \ - pxor_r2r(xmm0, xmm2); /* xmm2 = 17 16 15 14 -- -- -- -- */ \ - psrldq_i2r(6, xmm6); /* xmm0 = -- -- -- -- -- -- 04 01 */ \ - \ - por_r2r(xmm7, xmm1); /* xmm1 = 44 42 26 23 -- -- -- -- */ \ - por_r2r(xmm6, xmm0); /* xmm1 = -- -- -- -- 13 10 04 01 */ \ - /* 12345 in use */ \ - por_r2r(xmm0, xmm1); /* xmm1 = 44 42 26 23 13 10 04 01 */ \ - pshuflw_r2r(xmm4, xmm4, 0x093); /* xmm4 = 27 -- -- -- 22 21 20 -- */ \ - \ - pshufhw_r2r(xmm4, xmm4, 0x093); /* xmm4 = -- -- -- 27 22 21 20 -- */ \ - movdqu_r2m(xmm1, *(eax + 16)); /* write 44 42 26 23 13 10 04 01 */ \ - \ - pshufhw_r2r(xmm3, xmm3, 0x0D2); /* xmm3 = 07 05 -- 06 -- -- -- -- */ \ - movdqu_m2r(*(ecx + 64), xmm0); /* xmm0 = -- -- -- FF FF -- -- -- */ \ - \ - pand_r2r(xmm3, xmm0); /* xmm0 = -- -- -- 06 -- -- -- -- */ \ - psrldq_i2r(12, xmm3); /* xmm3 = -- -- -- -- -- -- 07 05 */ \ - \ - psrldq_i2r(8, xmm0); /* xmm0 = -- -- -- -- -- -- -- 06 */ \ - \ - movdqu_m2r(*(ecx + 64), xmm6); /* xmm6 = -- -- -- FF FF -- -- -- */ \ - movdqu_m2r(*(ecx + 96), xmm7); /* xmm7 = -- -- -- -- FF FF -- -- */ \ - \ - pand_r2r(xmm4, xmm6); /* xmm6 = -- -- -- 27 22 -- -- -- */ \ - pxor_r2r(xmm6, xmm4); /* xmm4 = -- -- -- -- -- 21 20 -- */ \ - \ - por_r2r(xmm6, xmm3); /* xmm3 = -- -- -- 27 22 -- 07 05 */ \ - pand_r2r(xmm4, xmm7); /* xmm7 = -- -- -- -- -- 21 -- -- */ \ - \ - por_r2r(xmm7, xmm0); /* xmm0 = -- -- -- -- -- 21 -- 06 */ \ - pxor_r2r(xmm7, xmm4); /* xmm4 = -- -- -- -- -- -- 20 -- */ \ - \ - movdqu_m2r(*(ecx + 16 ), xmm6); /* xmm6 = -- -- FF FF -- -- -- -- */ \ - movdqu_m2r(*(ecx + 64 ), xmm1); /* xmm1 = -- -- -- FF FF -- -- -- */ \ - \ - pand_r2r(xmm2, xmm6); /* xmm6 = -- -- 15 14 -- -- -- -- */ \ - pand_r2r(xmm6, xmm1); /* xmm1 = -- -- -- 14 -- -- -- -- */ \ - \ - pxor_r2r(xmm6, xmm2); /* xmm2 = 17 16 -- -- -- -- -- -- */ \ - pxor_r2r(xmm1, xmm6); /* xmm6 = -- -- 15 -- -- -- -- -- */ \ - \ - psrldq_i2r(4, xmm1); /* xmm1 = -- -- -- -- -- 14 -- -- */ \ - \ - psrldq_i2r(8, xmm6); /* xmm6 = -- -- -- -- -- -- 15 -- */ \ - por_r2r(xmm1, xmm3); /* xmm3 = -- -- -- 27 22 14 07 05 */ \ - \ - por_r2r(xmm6, xmm0); /* xmm0 = -- -- -- -- -- 21 15 06 */ \ - pshufhw_r2r(xmm5, xmm5, 0x0E1); /* xmm5 = 47 46 -- 45 -- -- 41 40 */ \ - \ - movdqu_m2r(*(ecx + 64), xmm1); /* xmm1 = -- -- -- FF FF -- -- -- */ \ - pshuflw_r2r(xmm5, xmm5, 0x072); /* xmm5 = 47 46 -- 45 41 -- 40 -- */ \ - \ - movdqu_r2r(xmm1, xmm6); /* xmm6 = -- -- -- FF FF -- -- -- */ \ - pand_r2r(xmm5, xmm1); /* xmm1 = -- -- -- 45 41 -- -- -- */ \ - \ - pxor_r2r(xmm1, xmm5); /* xmm5 = 47 46 -- -- -- -- 40 -- */ \ - pslldq_i2r(4, xmm1); /* xmm1 = -- 45 41 -- -- -- -- -- */ \ - \ - pshufd_r2r(xmm5, xmm5, 0x09C); /* xmm5 = -- -- -- -- 47 46 40 -- */ \ - por_r2r(xmm1, xmm3); /* xmm3 = -- 45 41 27 22 14 07 05 */ \ - \ - movdqu_m2r(*(eax + 96), xmm1); /* xmm1 = 67 66 65 64 63 62 61 60 */ \ - pmullw_m2r(*(ebx + 96), xmm1); \ - \ - movdqu_m2r(*(ecx), xmm7); /* xmm7 = -- -- -- -- -- FF FF -- */ \ - \ - psrldq_i2r(8, xmm6); /* xmm6 = -- -- -- -- -- -- -- FF */ \ - pand_r2r(xmm5, xmm7); /* xmm7 = -- -- -- -- -- 46 40 -- */ \ - \ - pand_r2r(xmm1, xmm6); /* xmm6 = -- -- -- -- -- -- -- 60 */ \ - pxor_r2r(xmm7, xmm5); /* xmm5 = -- -- -- -- 47 -- -- -- */ \ - \ - pxor_r2r(xmm6, xmm1); /* xmm1 = 67 66 65 64 63 62 61 -- */ \ - pslldq_i2r(2, xmm5); /* xmm5 = -- -- -- 47 -- -- -- -- */ \ - \ - pslldq_i2r(14, xmm6); /* xmm6 = 60 -- -- -- -- -- -- -- */ \ - por_r2r(xmm5, xmm4); /* xmm4 = -- -- -- 47 -- -- 20 -- */ \ - \ - por_r2r(xmm6, xmm3); /* xmm3 = 60 45 41 27 22 14 07 05 */ \ - pslldq_i2r(6, xmm7); /* xmm7 = -- -- 46 40 -- -- -- -- */ \ - \ - movdqu_r2m(xmm3, *(eax+32)); /* write 60 45 41 27 22 14 07 05 */ \ - por_r2r(xmm7, xmm0); /* xmm0 = -- -- 46 40 -- 21 15 06 */ \ - /* 0, 1, 2, 4 in use */ \ - movdqu_m2r(*(eax + 48), xmm3); /* xmm3 = 37 36 35 34 33 32 31 30 */ \ - movdqu_m2r(*(eax + 80), xmm5); /* xmm5 = 57 56 55 54 53 52 51 50 */ \ - \ - pmullw_m2r(*(ebx + 48), xmm3); \ - pmullw_m2r(*(ebx + 80), xmm5); \ - \ - movdqu_m2r(*(ecx + 64), xmm6); /* xmm6 = -- -- -- FF FF -- -- -- */ \ - movdqu_m2r(*(ecx + 64), xmm7); /* xmm7 = -- -- -- FF FF -- -- -- */ \ - \ - psrldq_i2r(8, xmm6); /* xmm6 = -- -- -- -- -- -- -- FF */ \ - pslldq_i2r(8, xmm7); /* xmm7 = FF -- -- -- -- -- -- -- */ \ - \ - pand_r2r(xmm3, xmm6); /* xmm6 = -- -- -- -- -- -- -- 30 */ \ - pand_r2r(xmm5, xmm7); /* xmm7 = 57 -- -- -- -- -- -- -- */ \ - \ - pxor_r2r(xmm6, xmm3); /* xmm3 = 37 36 35 34 33 32 31 -- */ \ - pxor_r2r(xmm7, xmm5); /* xmm5 = __ 56 55 54 53 52 51 50 */ \ - \ - pslldq_i2r(6, xmm6); /* xmm6 = -- -- -- -- 30 -- -- -- */ \ - psrldq_i2r(2, xmm7); /* xmm7 = -- 57 -- -- -- -- -- -- */ \ - \ - por_r2r(xmm7, xmm6); /* xmm6 = -- 57 -- -- 30 -- -- -- */ \ - movdqu_m2r(*(ecx), xmm7); /* xmm7 = -- -- -- -- -- FF FF -- */ \ - \ - por_r2r(xmm6, xmm0); /* xmm0 = -- 57 46 40 30 21 15 06 */ \ - psrldq_i2r(2, xmm7); /* xmm7 = -- -- -- -- -- -- FF FF */ \ - \ - movdqu_r2r(xmm2, xmm6); /* xmm6 = 17 16 -- -- -- -- -- -- */ \ - pand_r2r(xmm1, xmm7); /* xmm7 = -- -- -- -- -- -- 61 -- */ \ - \ - pslldq_i2r(2, xmm6); /* xmm6 = 16 -- -- -- -- -- -- -- */ \ - psrldq_i2r(14, xmm2); /* xmm2 = -- -- -- -- -- -- -- 17 */ \ - \ - pxor_r2r(xmm7, xmm1); /* xmm1 = 67 66 65 64 63 62 -- -- */ \ - pslldq_i2r(12, xmm7); /* xmm7 = 61 -- -- -- -- -- -- -- */ \ - \ - psrldq_i2r(14, xmm6); /* xmm6 = -- -- -- -- -- -- -- 16 */ \ - por_r2r(xmm6, xmm4); /* xmm4 = -- -- -- 47 -- -- 20 16 */ \ - \ - por_r2r(xmm7, xmm0); /* xmm0 = 61 57 46 40 30 21 15 06 */ \ - movdqu_m2r(*(ecx), xmm6); /* xmm6 = -- -- -- -- -- FF FF -- */ \ - \ - psrldq_i2r(2, xmm6); /* xmm6 = -- -- -- -- -- -- FF FF */ \ - movdqu_r2m(xmm0, *(eax+48)); /* write 61 57 46 40 30 21 15 06 */ \ - /* 1, 2, 3, 4, 5 in use */\ - movdqu_m2r(*(ecx), xmm0); /* xmm0 = -- -- -- -- -- FF FF -- */ \ - pand_r2r(xmm3, xmm6); /* xmm6 = -- -- -- -- -- -- 31 -- */ \ - \ - movdqu_r2r(xmm3, xmm7); /* xmm7 = 37 36 35 34 33 32 31 -- */ \ - pxor_r2r(xmm6, xmm3); /* xmm3 = 37 36 35 34 33 32 -- -- */ \ - \ - pslldq_i2r(2, xmm3); /* xmm3 = 36 35 34 33 32 -- -- -- */ \ - pand_r2r(xmm1, xmm0); /* xmm0 = -- -- -- -- -- 62 -- -- */ \ - \ - psrldq_i2r(14, xmm7); /* xmm7 = -- -- -- -- -- -- -- 37 */ \ - pxor_r2r(xmm0, xmm1); /* xmm1 = 67 66 65 64 63 -- -- -- */ \ - \ - por_r2r(xmm7, xmm6); /* xmm6 = -- -- -- -- -- -- 31 37 */ \ - movdqu_m2r(*(ecx + 64), xmm7); /* xmm7 = -- -- -- FF FF -- -- -- */ \ - \ - pshuflw_r2r(xmm6, xmm6, 0x01E); /* xmm6 = -- -- -- -- 37 31 -- -- */ \ - pslldq_i2r(6, xmm7); /* xmm7 = FF FF -- -- -- -- -- -- */ \ - \ - por_r2r(xmm6, xmm4); /* xmm4 = -- -- -- 47 37 31 20 16 */ \ - pand_r2r(xmm5, xmm7); /* xmm7 = -- 56 -- -- -- -- -- -- */ \ - \ - pslldq_i2r(8, xmm0); /* xmm0 = -- 62 -- -- -- -- -- -- */ \ - pxor_r2r(xmm7, xmm5); /* xmm5 = -- -- 55 54 53 52 51 50 */ \ - \ - psrldq_i2r(2, xmm7); /* xmm7 = -- -- 56 -- -- -- -- -- */ \ - \ - pshufhw_r2r(xmm3, xmm3, 0x087); /* xmm3 = 35 33 34 36 32 -- -- -- */ \ - por_r2r(xmm7, xmm0); /* xmm0 = -- 62 56 -- -- -- -- -- */ \ - \ - movdqu_m2r(*(eax + 112), xmm7); /* xmm7 = 77 76 75 74 73 72 71 70 */ \ - pmullw_m2r(*(ebx + 112), xmm7); \ - \ - movdqu_m2r(*(ecx + 64), xmm6); /* xmm6 = -- -- -- FF FF -- -- -- */ \ - por_r2r(xmm0, xmm4); /* xmm4 = -- 62 56 47 37 31 20 16 */ \ - \ - pshuflw_r2r(xmm7, xmm7, 0x0E1); /* xmm7 = 77 76 75 74 73 72 70 71 */ \ - psrldq_i2r(8, xmm6); /* xmm6 = -- -- -- -- -- -- -- FF */ \ - \ - movdqu_m2r(*(ecx + 64), xmm0); /* xmm0 = -- -- -- FF FF -- -- -- */ \ - pand_r2r(xmm7, xmm6); /* xmm6 = -- -- -- -- -- -- -- 71 */ \ - \ - pand_r2r(xmm3, xmm0); /* xmm0 = -- -- -- 36 32 -- -- -- */ \ - pxor_r2r(xmm6, xmm7); /* xmm7 = 77 76 75 74 73 72 70 -- */ \ - \ - pxor_r2r(xmm0, xmm3); /* xmm3 = 35 33 34 -- -- -- -- -- */ \ - pslldq_i2r(14, xmm6); /* xmm6 = 71 -- -- -- -- -- -- -- */ \ - \ - psrldq_i2r(4, xmm0); /* xmm0 = -- -- -- -- -- 36 32 -- */ \ - por_r2r(xmm6, xmm4); /* xmm4 = 71 62 56 47 37 31 20 16 */ \ - \ - por_r2r(xmm0, xmm2); /* xmm2 = -- -- -- -- -- 36 32 17 */ \ - movdqu_r2m(xmm4, *(eax + 64)); /* write 71 62 56 47 37 31 20 16 */ \ - /* 1, 2, 3, 5, 7 in use */ \ - movdqu_m2r(*(ecx + 80), xmm6); /* xmm6 = -- -- FF -- -- -- -- FF */ \ - pshufhw_r2r(xmm7, xmm7, 0x0D2); /* xmm7 = 77 75 74 76 73 72 70 __ */ \ - \ - movdqu_m2r(*(ecx), xmm4); /* xmm4 = -- -- -- -- -- FF FF -- */ \ - movdqu_m2r(*(ecx+48), xmm0); /* xmm0 = -- -- -- -- FF -- -- -- */ \ - \ - pand_r2r(xmm5, xmm6); /* xmm6 = -- -- 55 -- -- -- -- 50 */ \ - pand_r2r(xmm7, xmm4); /* xmm4 = -- -- -- -- -- 72 70 -- */ \ - \ - pand_r2r(xmm1, xmm0); /* xmm0 = -- -- -- -- 63 -- -- -- */ \ - pxor_r2r(xmm6, xmm5); /* xmm5 = -- -- -- 54 53 52 51 -- */ \ - \ - pxor_r2r(xmm4, xmm7); /* xmm7 = 77 75 74 76 73 -- -- -- */ \ - pxor_r2r(xmm0, xmm1); /* xmm1 = 67 66 65 64 -- -- -- -- */ \ - \ - pshuflw_r2r(xmm6, xmm6, 0x02B); /* xmm6 = -- -- 55 -- 50 -- -- -- */ \ - pslldq_i2r(10, xmm4); /* xmm4 = 72 20 -- -- -- -- -- -- */ \ - \ - pshufhw_r2r(xmm6, xmm6, 0x0B1); /* xmm6 = -- -- -- 55 50 -- -- -- */ \ - pslldq_i2r(4, xmm0); /* xmm0 = -- -- 63 -- -- -- -- -- */ \ - \ - por_r2r(xmm4, xmm6); /* xmm6 = 72 70 -- 55 50 -- -- -- */ \ - por_r2r(xmm0, xmm2); /* xmm2 = -- -- 63 -- -- 36 32 17 */ \ - \ - por_r2r(xmm6, xmm2); /* xmm2 = 72 70 64 55 50 36 32 17 */ \ - pshufhw_r2r(xmm1, xmm1, 0x0C9); /* xmm1 = 67 64 66 65 -- -- -- -- */ \ - \ - movdqu_r2r(xmm3, xmm6); /* xmm6 = 35 33 34 -- -- -- -- -- */ \ - movdqu_r2m(xmm2, *(eax+80)); /* write 72 70 64 55 50 36 32 17 */ \ - \ - psrldq_i2r(12, xmm6); /* xmm6 = -- -- -- -- -- -- 35 33 */ \ - pslldq_i2r(4, xmm3); /* xmm3 = 34 -- -- -- -- -- -- -- */ \ - \ - pshuflw_r2r(xmm5, xmm5, 0x04E); /* xmm5 = -- -- -- 54 51 -- 53 52 */ \ - movdqu_r2r(xmm7, xmm4); /* xmm4 = 77 75 74 76 73 -- -- -- */ \ - \ - movdqu_r2r(xmm5, xmm2); /* xmm2 = -- -- -- 54 51 -- 53 52 */ \ - psrldq_i2r(10, xmm7); /* xmm7 = -- -- -- -- -- 77 75 74 */ \ - \ - pslldq_i2r(6, xmm4); /* xmm4 = 76 73 -- -- -- -- -- -- */ \ - pslldq_i2r(12, xmm2); /* xmm2 = 53 52 -- -- -- -- -- -- */ \ - \ - movdqu_r2r(xmm1, xmm0); /* xmm0 = 67 64 66 65 -- -- -- -- */ \ - psrldq_i2r(12, xmm1); /* xmm1 = -- -- -- -- -- -- 67 64 */ \ - \ - psrldq_i2r(6, xmm5); /* xmm5 = -- -- -- -- -- -- 54 51 */ \ - psrldq_i2r(14, xmm3); /* xmm3 = -- -- -- -- -- -- -- 34 */ \ - \ - pslldq_i2r(10, xmm7); /* xmm7 = 77 75 74 -- -- -- -- -- */ \ - por_r2r(xmm6, xmm4); /* xmm4 = 76 73 -- -- -- -- 35 33 */ \ - \ - psrldq_i2r(10, xmm2); /* xmm2 = -- -- -- -- -- 53 52 -- */ \ - pslldq_i2r(4, xmm0); /* xmm0 = 66 65 -- -- -- -- -- -- */ \ - \ - pslldq_i2r(8, xmm1); /* xmm1 = -- -- 67 64 -- -- -- -- */ \ - por_r2r(xmm7, xmm3); /* xmm3 = 77 75 74 -- -- -- -- 34 */ \ - \ - psrldq_i2r(6, xmm0); /* xmm0 = -- -- -- 66 65 -- -- -- */ \ - pslldq_i2r(4, xmm5); /* xmm5 = -- -- -- -- 54 51 -- -- */ \ - \ - por_r2r(xmm1, xmm4); /* xmm4 = 76 73 67 64 -- -- 35 33 */ \ - por_r2r(xmm2, xmm3); /* xmm3 = 77 75 74 -- -- 53 52 34 */ \ - \ - por_r2r(xmm5, xmm4); /* xmm4 = 76 73 67 64 54 51 35 33 */ \ - por_r2r(xmm0, xmm3); /* xmm3 = 77 75 74 66 65 53 52 34 */ \ - \ - movdqu_r2m(xmm4, *(eax+96)); /* write 76 73 67 64 54 51 35 33 */ \ - movdqu_r2m(xmm3, *(eax+112)); /* write 77 75 74 66 65 53 52 34 */ \ - \ -} /* end of SSE2_Dequantize Macro */ - - -void ff_vp3_idct_sse2(int16_t *input_data) -{ - unsigned char *input_bytes = (unsigned char *)input_data; - unsigned char *output_data_bytes = (unsigned char *)input_data; - unsigned char *idct_data_bytes = (unsigned char *)SSE2_idct_data; - unsigned char *Eight = (unsigned char *)eight_data; - -#define eax input_bytes -//#define ebx dequant_matrix_bytes -#define ecx dequant_const_bytes -#define edx idct_data_bytes - -#define I(i) (eax + 16 * i) -#define O(i) (ebx + 16 * i) -#define C(i) (edx + 16 * (i-1)) - - // SSE2_Dequantize(); - -#undef ebx -#define ebx output_data_bytes - - SSE2_Row_IDCT(); - - SSE2_Transpose(); - - SSE2_Column_IDCT(); -} |