summaryrefslogtreecommitdiff
path: root/src/libffmpeg/libavcodec/i386
diff options
context:
space:
mode:
Diffstat (limited to 'src/libffmpeg/libavcodec/i386')
-rw-r--r--src/libffmpeg/libavcodec/i386/dsputil_mmx.c1104
-rw-r--r--src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h558
-rw-r--r--src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h305
-rw-r--r--src/libffmpeg/libavcodec/i386/fdct_mmx.c4
-rw-r--r--src/libffmpeg/libavcodec/i386/idct_mmx.c8
-rw-r--r--src/libffmpeg/libavcodec/i386/motion_est_mmx.c27
-rw-r--r--src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c339
-rw-r--r--src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c292
-rw-r--r--src/libffmpeg/libavcodec/i386/simple_idct_mmx.c2096
9 files changed, 2162 insertions, 2571 deletions
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c
index 2c71850ee..b8eaa5fbd 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c
@@ -1,25 +1,24 @@
/*
* MMX optimized DSP utils
- * Copyright (c) 2000, 2001 Gerard Lantau.
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
*
- * This program is distributed in the hope that it will be useful,
+ * This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*/
-#include "xine-utils/xineutils.h"
#include "../dsputil.h"
#include "../simple_idct.h"
@@ -45,38 +44,124 @@ int pix_abs8x8_x2_mmx2(UINT8 *blk1, UINT8 *blk2, int lx);
int pix_abs8x8_y2_mmx2(UINT8 *blk1, UINT8 *blk2, int lx);
int pix_abs8x8_xy2_mmx2(UINT8 *blk1, UINT8 *blk2, int lx);
-
/* external functions, from idct_mmx.c */
void ff_mmx_idct(DCTELEM *block);
void ff_mmxext_idct(DCTELEM *block);
/* pixel operations */
-static const unsigned long long int mm_wone __attribute__ ((aligned(8))) = 0x0001000100010001LL;
-static const unsigned long long int mm_wtwo __attribute__ ((aligned(8))) = 0x0002000200020002LL;
-//static const unsigned short mm_wone[4] __attribute__ ((aligned(8))) = { 0x1, 0x1, 0x1, 0x1 };
-//static const unsigned short mm_wtwo[4] __attribute__ ((aligned(8))) = { 0x2, 0x2, 0x2, 0x2 };
+static const uint64_t mm_bone __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
+static const uint64_t mm_wone __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
+static const uint64_t mm_wtwo __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
#define JUMPALIGN() __asm __volatile (".balign 8"::)
#define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
+#define MOVQ_WONE(regd) \
+ __asm __volatile ( \
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
+ "psrlw $15, %%" #regd ::)
+
+#define MOVQ_BFE(regd) \
+ __asm __volatile ( \
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
+ "paddb %%" #regd ", %%" #regd " \n\t" ::)
+
#ifndef PIC
-#define MOVQ_WONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wone))
+#define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
#define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
#else
// for shared library it's better to use this way for accessing constants
// pcmpeqd -> -1
-#define MOVQ_WONE(regd) \
+#define MOVQ_BONE(regd) \
__asm __volatile ( \
- "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
- "psrlw $15, %%" #regd ::)
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
+ "psrlw $15, %%" #regd " \n\t" \
+ "packuswb %%" #regd ", %%" #regd " \n\t" ::)
#define MOVQ_WTWO(regd) \
__asm __volatile ( \
- "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
- "psrlw $15, %%" #regd " \n\t" \
- "psllw $1, %%" #regd ::)
+ "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
+ "psrlw $15, %%" #regd " \n\t" \
+ "psllw $1, %%" #regd " \n\t"::)
+
#endif
+// using regr as temporary and for the output result
+// first argument is unmodifed and second is trashed
+// regfe is supposed to contain 0xfefefefefefefefe
+#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
+ "movq " #rega ", " #regr " \n\t"\
+ "pand " #regb ", " #regr " \n\t"\
+ "pxor " #rega ", " #regb " \n\t"\
+ "pand " #regfe "," #regb " \n\t"\
+ "psrlq $1, " #regb " \n\t"\
+ "paddb " #regb ", " #regr " \n\t"
+
+#define PAVGB_MMX(rega, regb, regr, regfe) \
+ "movq " #rega ", " #regr " \n\t"\
+ "por " #regb ", " #regr " \n\t"\
+ "pxor " #rega ", " #regb " \n\t"\
+ "pand " #regfe "," #regb " \n\t"\
+ "psrlq $1, " #regb " \n\t"\
+ "psubb " #regb ", " #regr " \n\t"
+
+// mm6 is supposed to contain 0xfefefefefefefefe
+#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
+ "movq " #rega ", " #regr " \n\t"\
+ "movq " #regc ", " #regp " \n\t"\
+ "pand " #regb ", " #regr " \n\t"\
+ "pand " #regd ", " #regp " \n\t"\
+ "pxor " #rega ", " #regb " \n\t"\
+ "pxor " #regc ", " #regd " \n\t"\
+ "pand %%mm6, " #regb " \n\t"\
+ "pand %%mm6, " #regd " \n\t"\
+ "psrlq $1, " #regb " \n\t"\
+ "psrlq $1, " #regd " \n\t"\
+ "paddb " #regb ", " #regr " \n\t"\
+ "paddb " #regd ", " #regp " \n\t"
+
+#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
+ "movq " #rega ", " #regr " \n\t"\
+ "movq " #regc ", " #regp " \n\t"\
+ "por " #regb ", " #regr " \n\t"\
+ "por " #regd ", " #regp " \n\t"\
+ "pxor " #rega ", " #regb " \n\t"\
+ "pxor " #regc ", " #regd " \n\t"\
+ "pand %%mm6, " #regb " \n\t"\
+ "pand %%mm6, " #regd " \n\t"\
+ "psrlq $1, " #regd " \n\t"\
+ "psrlq $1, " #regb " \n\t"\
+ "psubb " #regb ", " #regr " \n\t"\
+ "psubb " #regd ", " #regp " \n\t"
+
+/***********************************/
+/* MMX no rounding */
+#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
+#define SET_RND MOVQ_WONE
+#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
+#define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
+
+#include "dsputil_mmx_rnd.h"
+
+#undef DEF
+#undef SET_RND
+#undef PAVGBP
+#undef PAVGB
+/***********************************/
+/* MMX rounding */
+
+#define DEF(x, y) x ## _ ## y ##_mmx
+#define SET_RND MOVQ_WTWO
+#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
+#define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
+
+#include "dsputil_mmx_rnd.h"
+
+#undef DEF
+#undef SET_RND
+#undef PAVGBP
+#undef PAVGB
+
/***********************************/
/* 3Dnow specific */
@@ -92,7 +177,7 @@ static const unsigned long long int mm_wtwo __attribute__ ((aligned(8))) = 0x000
/***********************************/
/* MMX2 specific */
-#define DEF(x) x ## _sse
+#define DEF(x) x ## _mmx2
/* Introduced only in MMX2 set */
#define PAVGB "pavgb"
@@ -107,34 +192,59 @@ static const unsigned long long int mm_wtwo __attribute__ ((aligned(8))) = 0x000
static void get_pixels_mmx(DCTELEM *block, const UINT8 *pixels, int line_size)
{
- DCTELEM *p;
- const UINT8 *pix;
- int i;
+ asm volatile(
+ "movl $-128, %%eax \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq (%0, %2), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "movq %%mm0, (%1, %%eax)\n\t"
+ "movq %%mm1, 8(%1, %%eax)\n\t"
+ "movq %%mm2, 16(%1, %%eax)\n\t"
+ "movq %%mm3, 24(%1, %%eax)\n\t"
+ "addl %3, %0 \n\t"
+ "addl $32, %%eax \n\t"
+ "js 1b \n\t"
+ : "+r" (pixels)
+ : "r" (block+64), "r" (line_size), "r" (line_size*2)
+ : "%eax"
+ );
+}
- /* read the pixels */
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- for(i=0;i<4;i++) {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm2, 8%0\n\t"
- "movq %%mm1, 16%0\n\t"
- "movq %%mm3, 24%0\n\t"
- :"=m"(*p)
- :"m"(*pix), "m"(*(pix+line_size))
- :"memory");
- pix += line_size*2;
- p += 16;
- }
+static void diff_pixels_mmx(DCTELEM *block, const UINT8 *s1, const UINT8 *s2, int stride)
+{
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movl $-128, %%eax \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ "movq (%0), %%mm0 \n\t"
+ "movq (%1), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "movq %%mm0, (%2, %%eax)\n\t"
+ "movq %%mm1, 8(%2, %%eax)\n\t"
+ "addl %3, %0 \n\t"
+ "addl %3, %1 \n\t"
+ "addl $16, %%eax \n\t"
+ "jnz 1b \n\t"
+ : "+r" (s1), "+r" (s2)
+ : "r" (block+64), "r" (stride)
+ : "%eax"
+ );
}
static void put_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line_size)
@@ -203,12 +313,12 @@ static void add_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line
pix = pixels;
MOVQ_ZERO(mm7);
i = 4;
- while (i) {
+ do {
__asm __volatile(
- "movq %2, %%mm0\n\t"
- "movq 8%2, %%mm1\n\t"
- "movq 16%2, %%mm2\n\t"
- "movq 24%2, %%mm3\n\t"
+ "movq (%2), %%mm0\n\t"
+ "movq 8(%2), %%mm1\n\t"
+ "movq 16(%2), %%mm2\n\t"
+ "movq 24(%2), %%mm3\n\t"
"movq %0, %%mm4\n\t"
"movq %1, %%mm6\n\t"
"movq %%mm4, %%mm5\n\t"
@@ -226,809 +336,42 @@ static void add_pixels_clamped_mmx(const DCTELEM *block, UINT8 *pixels, int line
"movq %%mm0, %0\n\t"
"movq %%mm2, %1\n\t"
:"+m"(*pix), "+m"(*(pix+line_size))
- :"m"(*p)
+ :"r"(p)
:"memory");
pix += line_size*2;
p += 16;
- i--;
- };
+ } while (--i);
}
static void put_pixels_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- int hh;
- UINT8 *p;
- const UINT8 *pix;
-
- p = block;
- pix = pixels; // 2s
-#if 0
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += line_size;
- } while (--h);
-#else
- // this optimized code is not very usefull
- // the above loop is definitely faster
- // at least on Celeron 500MHz
- hh = h & 3;
- while (hh) {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += line_size;
- hh--;
- }
- hh=h>>2;
- while (hh) {
- __asm __volatile(
- "movq (%1), %%mm0 \n\t"
- "movq (%1, %2), %%mm1 \n\t"
- "movq (%1, %2, 2), %%mm2 \n\t"
- "movq (%1, %3), %%mm3 \n\t"
- "movq %%mm0, (%0) \n\t"
- "movq %%mm1, (%0, %2) \n\t"
- "movq %%mm2, (%0, %2, 2) \n\t"
- "movq %%mm3, (%0, %3) \n\t"
- ::"r"(p), "r"(pix), "r"(line_size), "r"(line_size*3)
- :"memory");
- pix += line_size*4;
- p += line_size*4;
- hh--;
- }
-#endif
-}
-
-static void put_pixels_x2_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm4);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq 1%1, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm4, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size; p += line_size;
- } while (--h);
-}
-
-static void put_pixels_y2_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm4);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm4, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += line_size;
- } while (--h);
-}
-
-static void put_pixels_xy2_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels; // 1s
- MOVQ_ZERO(mm7);
- MOVQ_WTWO(mm6);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq 1%1, %%mm4\n\t"
- "movq 1%2, %%mm5\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "movq %%mm4, %%mm1\n\t"
- "movq %%mm5, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpcklbw %%mm7, %%mm5\n\t"
- "punpckhbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm5, %%mm4\n\t"
- "paddusw %%mm3, %%mm1\n\t"
- "paddusw %%mm6, %%mm4\n\t"
- "paddusw %%mm6, %%mm1\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm1, %%mm2\n\t"
- "psrlw $2, %%mm0\n\t"
- "psrlw $2, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += line_size;
- } while(--h);
-}
-
-static void put_no_rnd_pixels_x2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq 1%1, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += line_size;
- } while (--h);
-}
-
-static void put_no_rnd_pixels_y2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += line_size;
- } while(--h);
-}
-
-static void put_no_rnd_pixels_xy2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm6);
- JUMPALIGN();
- do {
__asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq 1%1, %%mm4\n\t"
- "movq 1%2, %%mm5\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "movq %%mm4, %%mm1\n\t"
- "movq %%mm5, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpcklbw %%mm7, %%mm5\n\t"
- "punpckhbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm5, %%mm4\n\t"
- "paddusw %%mm3, %%mm1\n\t"
- "paddusw %%mm6, %%mm4\n\t"
- "paddusw %%mm6, %%mm1\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm1, %%mm2\n\t"
- "psrlw $2, %%mm0\n\t"
- "psrlw $2, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += line_size;
- } while(--h);
-}
-
-static void avg_pixels_mmx(UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm6);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %0, %%mm0\n\t"
- "movq %1, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "paddusw %%mm6, %%mm0\n\t"
- "paddusw %%mm6, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += line_size;
- }
- while (--h);
-}
-
-static void avg_pixels_x2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm6);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %1, %%mm1\n\t"
- "movq %0, %%mm0\n\t"
- "movq 1%1, %%mm4\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "movq %%mm4, %%mm5\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpckhbw %%mm7, %%mm5\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "paddusw %%mm4, %%mm1\n\t"
- "paddusw %%mm5, %%mm3\n\t"
- "paddusw %%mm6, %%mm1\n\t"
- "paddusw %%mm6, %%mm3\n\t"
- "psrlw $1, %%mm1\n\t"
- "psrlw $1, %%mm3\n\t"
- "paddusw %%mm6, %%mm0\n\t"
- "paddusw %%mm6, %%mm2\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += line_size;
- } while (--h);
-}
-
-static void avg_pixels_y2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm6);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %1, %%mm1\n\t"
- "movq %0, %%mm0\n\t"
- "movq %2, %%mm4\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "movq %%mm4, %%mm5\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpckhbw %%mm7, %%mm5\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "paddusw %%mm4, %%mm1\n\t"
- "paddusw %%mm5, %%mm3\n\t"
- "paddusw %%mm6, %%mm1\n\t"
- "paddusw %%mm6, %%mm3\n\t"
- "psrlw $1, %%mm1\n\t"
- "psrlw $1, %%mm3\n\t"
- "paddusw %%mm6, %%mm0\n\t"
- "paddusw %%mm6, %%mm2\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix), "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += line_size ;
- } while(--h);
-}
-
-static void avg_pixels_xy2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- // this doesn't seem to be used offten - so
- // the inside usage of mm_wone is not optimized
- MOVQ_WTWO(mm6);
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq 1%1, %%mm4\n\t"
- "movq 1%2, %%mm5\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "movq %%mm4, %%mm1\n\t"
- "movq %%mm5, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpcklbw %%mm7, %%mm5\n\t"
- "punpckhbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm5, %%mm4\n\t"
- "paddusw %%mm3, %%mm1\n\t"
- "paddusw %%mm6, %%mm4\n\t"
- "paddusw %%mm6, %%mm1\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm1, %%mm2\n\t"
- "movq %3, %%mm5\n\t"
- "psrlw $2, %%mm0\n\t"
- "movq %0, %%mm1\n\t"
- "psrlw $2, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "paddusw %%mm5, %%mm0\n\t"
- "paddusw %%mm5, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size)), "m"(mm_wone)
- :"memory");
- pix += line_size;
- p += line_size ;
- } while(--h);
-}
-
-static void avg_no_rnd_pixels_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += line_size ;
- } while (--h);
-}
-
-static void avg_no_rnd_pixels_x2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq 1%1, %%mm1\n\t"
- "movq %0, %%mm4\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "movq %%mm4, %%mm5\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpckhbw %%mm7, %%mm5\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm5, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += line_size;
- } while (--h);
-}
-
-static void avg_no_rnd_pixels_y2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq %0, %%mm4\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "movq %%mm4, %%mm5\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpckhbw %%mm7, %%mm5\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm5, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix), "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += line_size ;
- } while(--h);
-}
-
-static void avg_no_rnd_pixels_xy2_mmx( UINT8 *block, const UINT8 *pixels, int line_size, int h)
-{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm6);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq 1%1, %%mm4\n\t"
- "movq 1%2, %%mm5\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "movq %%mm4, %%mm1\n\t"
- "movq %%mm5, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpcklbw %%mm7, %%mm5\n\t"
- "punpckhbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm5, %%mm4\n\t"
- "paddusw %%mm3, %%mm1\n\t"
- "paddusw %%mm6, %%mm4\n\t"
- "paddusw %%mm6, %%mm1\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm1, %%mm2\n\t"
- "movq %0, %%mm1\n\t"
- "psrlw $2, %%mm0\n\t"
- "movq %%mm1, %%mm3\n\t"
- "psrlw $2, %%mm2\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "psrlw $1, %%mm0\n\t"
- "psrlw $1, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += line_size;
- } while(--h);
-}
-
-static void sub_pixels_mmx( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
-{
- DCTELEM *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- do {
- __asm __volatile(
- "movq %0, %%mm0\n\t"
- "movq %1, %%mm2\n\t"
- "movq 8%0, %%mm1\n\t"
- "movq %%mm2, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "psubsw %%mm2, %%mm0\n\t"
- "psubsw %%mm3, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, 8%0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += 8;
- } while (--h);
-}
-
-static void sub_pixels_x2_mmx( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
-{
- DCTELEM *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm6);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %0, %%mm0\n\t"
- "movq %1, %%mm2\n\t"
- "movq 8%0, %%mm1\n\t"
- "movq 1%1, %%mm4\n\t"
- "movq %%mm2, %%mm3\n\t"
- "movq %%mm4, %%mm5\n\t"
- "punpcklbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpckhbw %%mm7, %%mm5\n\t"
- "paddusw %%mm4, %%mm2\n\t"
- "paddusw %%mm5, %%mm3\n\t"
- "paddusw %%mm6, %%mm2\n\t"
- "paddusw %%mm6, %%mm3\n\t"
- "psrlw $1, %%mm2\n\t"
- "psrlw $1, %%mm3\n\t"
- "psubsw %%mm2, %%mm0\n\t"
- "psubsw %%mm3, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, 8%0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += 8;
- } while (--h);
-}
-
-static void sub_pixels_y2_mmx( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
-{
- DCTELEM *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WONE(mm6);
- do {
- __asm __volatile(
- "movq %0, %%mm0\n\t"
- "movq %1, %%mm2\n\t"
- "movq 8%0, %%mm1\n\t"
- "movq %2, %%mm4\n\t"
- "movq %%mm2, %%mm3\n\t"
- "movq %%mm4, %%mm5\n\t"
- "punpcklbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpckhbw %%mm7, %%mm5\n\t"
- "paddusw %%mm4, %%mm2\n\t"
- "paddusw %%mm5, %%mm3\n\t"
- "paddusw %%mm6, %%mm2\n\t"
- "paddusw %%mm6, %%mm3\n\t"
- "psrlw $1, %%mm2\n\t"
- "psrlw $1, %%mm3\n\t"
- "psubsw %%mm2, %%mm0\n\t"
- "psubsw %%mm3, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, 8%0\n\t"
- :"+m"(*p)
- :"m"(*pix), "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += 8;
- } while (--h);
-}
-
-static void sub_pixels_xy2_mmx( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
-{
- DCTELEM *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- MOVQ_ZERO(mm7);
- MOVQ_WTWO(mm6);
- JUMPALIGN();
- do {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq 1%1, %%mm4\n\t"
- "movq 1%2, %%mm5\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "movq %%mm4, %%mm1\n\t"
- "movq %%mm5, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpcklbw %%mm7, %%mm5\n\t"
- "punpckhbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm5, %%mm4\n\t"
- "paddusw %%mm3, %%mm1\n\t"
- "paddusw %%mm6, %%mm4\n\t"
- "paddusw %%mm6, %%mm1\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm1, %%mm2\n\t"
- "movq %0, %%mm1\n\t"
- "movq 8%0, %%mm3\n\t"
- "psrlw $2, %%mm0\n\t"
- "psrlw $2, %%mm2\n\t"
- "psubsw %%mm0, %%mm1\n\t"
- "psubsw %%mm2, %%mm3\n\t"
- "movq %%mm1, %0\n\t"
- "movq %%mm3, 8%0\n\t"
- :"+m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += 8 ;
- } while(--h);
+ "lea (%3, %3), %%eax \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ : "+g"(h), "+r" (pixels), "+r" (block)
+ : "r"(line_size)
+ : "%eax", "memory"
+ );
}
static void clear_blocks_mmx(DCTELEM *blocks)
{
- asm volatile(
+ __asm __volatile(
"pxor %%mm7, %%mm7 \n\t"
"movl $-128*6, %%eax \n\t"
"1: \n\t"
@@ -1043,7 +386,9 @@ static void clear_blocks_mmx(DCTELEM *blocks)
);
}
+#if 0
static void just_return() { return; }
+#endif
void dsputil_init_mmx(void)
{
@@ -1065,10 +410,11 @@ void dsputil_init_mmx(void)
if (mm_flags & MM_MMX) {
get_pixels = get_pixels_mmx;
+ diff_pixels = diff_pixels_mmx;
put_pixels_clamped = put_pixels_clamped_mmx;
add_pixels_clamped = add_pixels_clamped_mmx;
clear_blocks= clear_blocks_mmx;
-
+
pix_abs16x16 = pix_abs16x16_mmx;
pix_abs16x16_x2 = pix_abs16x16_x2_mmx;
pix_abs16x16_y2 = pix_abs16x16_y2_mmx;
@@ -1088,7 +434,7 @@ void dsputil_init_mmx(void)
put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx;
put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx;
put_no_rnd_pixels_tab[3] = put_no_rnd_pixels_xy2_mmx;
-
+
avg_pixels_tab[0] = avg_pixels_mmx;
avg_pixels_tab[1] = avg_pixels_x2_mmx;
avg_pixels_tab[2] = avg_pixels_y2_mmx;
@@ -1098,44 +444,37 @@ void dsputil_init_mmx(void)
avg_no_rnd_pixels_tab[1] = avg_no_rnd_pixels_x2_mmx;
avg_no_rnd_pixels_tab[2] = avg_no_rnd_pixels_y2_mmx;
avg_no_rnd_pixels_tab[3] = avg_no_rnd_pixels_xy2_mmx;
-
- sub_pixels_tab[0] = sub_pixels_mmx;
- sub_pixels_tab[1] = sub_pixels_x2_mmx;
- sub_pixels_tab[2] = sub_pixels_y2_mmx;
- sub_pixels_tab[3] = sub_pixels_xy2_mmx;
if (mm_flags & MM_MMXEXT) {
pix_abs16x16 = pix_abs16x16_mmx2;
pix_abs16x16_x2 = pix_abs16x16_x2_mmx2;
pix_abs16x16_y2 = pix_abs16x16_y2_mmx2;
pix_abs16x16_xy2= pix_abs16x16_xy2_mmx2;
-
+
pix_abs8x8 = pix_abs8x8_mmx2;
pix_abs8x8_x2 = pix_abs8x8_x2_mmx2;
pix_abs8x8_y2 = pix_abs8x8_y2_mmx2;
pix_abs8x8_xy2= pix_abs8x8_xy2_mmx2;
-
- put_pixels_tab[1] = put_pixels_x2_sse;
- put_pixels_tab[2] = put_pixels_y2_sse;
-
- avg_pixels_tab[0] = avg_pixels_sse;
- avg_pixels_tab[1] = avg_pixels_x2_sse;
- avg_pixels_tab[2] = avg_pixels_y2_sse;
- avg_pixels_tab[3] = avg_pixels_xy2_sse;
-
- sub_pixels_tab[1] = sub_pixels_x2_sse;
- sub_pixels_tab[2] = sub_pixels_y2_sse;
+
+ put_pixels_tab[1] = put_pixels_x2_mmx2;
+ put_pixels_tab[2] = put_pixels_y2_mmx2;
+ put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx2;
+ put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx2;
+
+ avg_pixels_tab[0] = avg_pixels_mmx2;
+ avg_pixels_tab[1] = avg_pixels_x2_mmx2;
+ avg_pixels_tab[2] = avg_pixels_y2_mmx2;
+ avg_pixels_tab[3] = avg_pixels_xy2_mmx2;
} else if (mm_flags & MM_3DNOW) {
put_pixels_tab[1] = put_pixels_x2_3dnow;
put_pixels_tab[2] = put_pixels_y2_3dnow;
-
+ put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_3dnow;
+ put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_3dnow;
+
avg_pixels_tab[0] = avg_pixels_3dnow;
avg_pixels_tab[1] = avg_pixels_x2_3dnow;
avg_pixels_tab[2] = avg_pixels_y2_3dnow;
avg_pixels_tab[3] = avg_pixels_xy2_3dnow;
-
- sub_pixels_tab[1] = sub_pixels_x2_3dnow;
- sub_pixels_tab[2] = sub_pixels_y2_3dnow;
}
/* idct */
@@ -1181,12 +520,25 @@ void dsputil_init_mmx(void)
avg_no_rnd_pixels_tab[2] = just_return;
avg_no_rnd_pixels_tab[3] = just_return;
- sub_pixels_tab[0] = just_return;
- sub_pixels_tab[1] = just_return;
- sub_pixels_tab[2] = just_return;
- sub_pixels_tab[3] = just_return;
-
//av_fdct = just_return;
//ff_idct = just_return;
#endif
}
+
+/* remove any non bit exact operation (testing purpose). NOTE that
+ this function should be kept as small as possible because it is
+ always difficult to test automatically non bit exact cases. */
+void dsputil_set_bit_exact_mmx(void)
+{
+ if (mm_flags & MM_MMX) {
+ if (mm_flags & MM_MMXEXT) {
+ put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx;
+ put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx;
+ avg_pixels_tab[3] = avg_pixels_xy2_mmx;
+ } else if (mm_flags & MM_3DNOW) {
+ put_no_rnd_pixels_tab[1] = put_no_rnd_pixels_x2_mmx;
+ put_no_rnd_pixels_tab[2] = put_no_rnd_pixels_y2_mmx;
+ avg_pixels_tab[3] = avg_pixels_xy2_mmx;
+ }
+ }
+}
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h
index 830fe9f3b..a16ccc88b 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h
+++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h
@@ -1,342 +1,296 @@
/*
* DSP utils : average functions are compiled twice for 3dnow/mmx2
- * Copyright (c) 2000, 2001 Gerard Lantau.
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ * Copyright (c) 2002 Michael Niedermayer
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
*
- * This program is distributed in the hope that it will be useful,
+ * This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ * mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
+ * and improved by Zdenek Kabelac <kabi@users.sf.net>
+ */
+
+/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm
+ clobber bug - now it will work with 2.95.2 and also with -fPIC
*/
-
static void DEF(put_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- int dh, hh;
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- hh=h>>2;
- dh=h&3;
- while(hh--) {
__asm __volatile(
- "movq (%1), %%mm0\n\t"
- "movq 1(%1), %%mm1\n\t"
- "movq (%1, %2), %%mm2\n\t"
- "movq 1(%1, %2), %%mm3\n\t"
- "movq (%1, %2, 2), %%mm4\n\t"
- "movq 1(%1, %2, 2), %%mm5\n\t"
- "movq (%1, %3), %%mm6\n\t"
- "movq 1(%1, %3), %%mm7\n\t"
- PAVGB" %%mm1, %%mm0\n\t"
- PAVGB" %%mm3, %%mm2\n\t"
- PAVGB" %%mm5, %%mm4\n\t"
- PAVGB" %%mm7, %%mm6\n\t"
- "movq %%mm0, (%0)\n\t"
- "movq %%mm2, (%0, %2)\n\t"
- "movq %%mm4, (%0, %2, 2)\n\t"
- "movq %%mm6, (%0, %3)\n\t"
- ::"r"(p), "r"(pix), "r" (line_size), "r" (line_size*3)
- :"memory");
- pix += line_size*4; p += line_size*4;
- }
- while(dh--) {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq 1%1, %%mm1\n\t"
- PAVGB" %%mm1, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size; p += line_size;
- }
+ "lea (%3, %3), %%eax \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ "addl %%eax, %1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" (line_size)
+ :"%eax", "memory");
}
-
-static void DEF(put_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+
+/* GL: this function does incorrect rounding if overflow */
+static void DEF(put_no_rnd_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- int dh, hh;
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
-
- hh=h>>1;
- dh=h&1;
- while(hh--) {
- __asm __volatile(
- "movq %2, %%mm0\n\t"
- "movq %3, %%mm1\n\t"
- "movq %4, %%mm2\n\t"
- PAVGB" %%mm1, %%mm0\n\t"
- PAVGB" %%mm2, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, %1\n\t"
- :"=m"(*p), "=m"(*(p+line_size))
- :"m"(*pix), "m"(*(pix+line_size)),
- "m"(*(pix+line_size*2))
- :"memory");
- pix += line_size*2;
- p += line_size*2;
- }
- if(dh) {
+ MOVQ_BONE(mm6);
__asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- PAVGB" %%mm1, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"=m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size))
- :"memory");
- }
+ "lea (%3, %3), %%eax \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ "addl %%eax, %1 \n\t"
+ "psubusb %%mm6, %%mm0 \n\t"
+ "psubusb %%mm6, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ "addl %%eax, %2 \n\t"
+ "addl %%eax, %1 \n\t"
+ "psubusb %%mm6, %%mm0 \n\t"
+ "psubusb %%mm6, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" (line_size)
+ :"%eax", "memory");
}
-static void DEF(avg_pixels)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(put_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- int dh, hh;
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- hh=h>>2;
- dh=h&3;
- while(hh--) {
__asm __volatile(
- "movq (%0), %%mm0\n\t"
- "movq (%1), %%mm1\n\t"
- "movq (%0, %2), %%mm2\n\t"
- "movq (%1, %2), %%mm3\n\t"
- "movq (%0, %2, 2), %%mm4\n\t"
- "movq (%1, %2, 2), %%mm5\n\t"
- "movq (%0, %3), %%mm6\n\t"
- "movq (%1, %3), %%mm7\n\t"
- PAVGB" %%mm1, %%mm0\n\t"
- PAVGB" %%mm3, %%mm2\n\t"
- PAVGB" %%mm5, %%mm4\n\t"
- PAVGB" %%mm7, %%mm6\n\t"
- "movq %%mm0, (%0)\n\t"
- "movq %%mm2, (%0, %2)\n\t"
- "movq %%mm4, (%0, %2, 2)\n\t"
- "movq %%mm6, (%0, %3)\n\t"
- ::"r"(p), "r"(pix), "r" (line_size), "r" (line_size*3)
- :"memory");
- pix += line_size*4; p += line_size*4;
- }
- while(dh--) {
- __asm __volatile(
- "movq %0, %%mm0\n\t"
- "movq %1, %%mm1\n\t"
- PAVGB" %%mm1, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size; p += line_size;
- }
+ "lea (%3, %3), %%eax \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "subl %3, %2 \n\t"
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm2 \n\t"
+ "addl %%eax, %1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm2, %%mm1 \n\t"
+ "movq %%mm0, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%eax) \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm0 \n\t"
+ "addl %%eax, %2 \n\t"
+ "addl %%eax, %1 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm0, %%mm1 \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%eax) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D" (block)
+ :"r" (line_size)
+ :"%eax", "memory");
}
-static void DEF(avg_pixels_x2)( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+/* GL: this function does incorrect rounding if overflow */
+static void DEF(put_no_rnd_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- int dh, hh;
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- hh=h>>1;
- dh=h&1;
- while(hh--) {
- __asm __volatile(
- "movq %2, %%mm2\n\t"
- "movq 1%2, %%mm3\n\t"
- "movq %3, %%mm4\n\t"
- "movq 1%3, %%mm5\n\t"
- "movq %0, %%mm0\n\t"
- "movq %1, %%mm1\n\t"
- PAVGB" %%mm3, %%mm2\n\t"
- PAVGB" %%mm2, %%mm0\n\t"
- PAVGB" %%mm5, %%mm4\n\t"
- PAVGB" %%mm4, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, %1\n\t"
- :"+m"(*p), "+m"(*(p+line_size))
- :"m"(*pix), "m"(*(pix+line_size))
- :"memory");
- pix += line_size*2;
- p += line_size*2;
- }
- if(dh) {
+ MOVQ_BONE(mm6);
__asm __volatile(
- "movq %1, %%mm1\n\t"
- "movq 1%1, %%mm2\n\t"
- "movq %0, %%mm0\n\t"
- PAVGB" %%mm2, %%mm1\n\t"
- PAVGB" %%mm1, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- }
+ "lea (%3, %3), %%eax \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "subl %3, %2 \n\t"
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm2 \n\t"
+ "addl %%eax, %1 \n\t"
+ "psubusb %%mm6, %%mm1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm2, %%mm1 \n\t"
+ "movq %%mm0, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%eax) \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm0 \n\t"
+ "addl %%eax, %2 \n\t"
+ "addl %%eax, %1 \n\t"
+ "psubusb %%mm6, %%mm1 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm0, %%mm1 \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%eax) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D" (block)
+ :"r" (line_size)
+ :"%eax", "memory");
}
-static void DEF(avg_pixels_y2)( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg_pixels)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- int dh, hh;
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- hh=h>>1;
- dh=h&1;
- while(hh--) {
- __asm __volatile(
- "movq %2, %%mm2\n\t"
- "movq %3, %%mm3\n\t"
- "movq %3, %%mm4\n\t"
- "movq %4, %%mm5\n\t"
- "movq %0, %%mm0\n\t"
- "movq %1, %%mm1\n\t"
- PAVGB" %%mm3, %%mm2\n\t"
- PAVGB" %%mm2, %%mm0\n\t"
- PAVGB" %%mm5, %%mm4\n\t"
- PAVGB" %%mm4, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, %1\n\t"
- :"+m"(*p), "+m"(*(p+line_size))
- :"m"(*pix), "m"(*(pix+line_size)), "m"(*(pix+line_size*2))
- :"memory");
- pix += line_size*2;
- p += line_size*2;
- }
- if(dh) {
__asm __volatile(
- "movq %1, %%mm1\n\t"
- "movq %2, %%mm2\n\t"
- "movq %0, %%mm0\n\t"
- PAVGB" %%mm2, %%mm1\n\t"
- PAVGB" %%mm1, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix), "m"(*(pix+line_size))
- :"memory");
- }
+ "lea (%3, %3), %%eax \n\t"
+ "1: \n\t"
+ "movq (%2), %%mm0 \n\t"
+ "movq (%2, %3), %%mm1 \n\t"
+ PAVGB" (%1), %%mm0 \n\t"
+ PAVGB" (%1, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "movq (%2), %%mm0 \n\t"
+ "movq (%2, %3), %%mm1 \n\t"
+ PAVGB" (%1), %%mm0 \n\t"
+ PAVGB" (%1, %3), %%mm1 \n\t"
+ "addl %%eax, %1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" (line_size)
+ :"%eax", "memory");
}
-static void DEF(avg_pixels_xy2)( UINT8 *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg_pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- UINT8 *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- __asm __volatile(
- "pxor %%mm7, %%mm7\n\t"
- "movq %0, %%mm6\n\t"
- ::"m"(mm_wtwo));
- do {
__asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %2, %%mm1\n\t"
- "movq 1%1, %%mm4\n\t"
- "movq 1%2, %%mm5\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm0\n\t"
- "punpcklbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm1, %%mm0\n\t"
- "paddusw %%mm3, %%mm2\n\t"
- "movq %%mm4, %%mm1\n\t"
- "movq %%mm5, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm4\n\t"
- "punpcklbw %%mm7, %%mm5\n\t"
- "punpckhbw %%mm7, %%mm1\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "paddusw %%mm5, %%mm4\n\t"
- "paddusw %%mm3, %%mm1\n\t"
- "paddusw %%mm6, %%mm4\n\t"
- "paddusw %%mm6, %%mm1\n\t"
- "paddusw %%mm4, %%mm0\n\t"
- "paddusw %%mm1, %%mm2\n\t"
- "psrlw $2, %%mm0\n\t"
- "psrlw $2, %%mm2\n\t"
- "packuswb %%mm2, %%mm0\n\t"
- PAVGB" %0, %%mm0\n\t"
- "movq %%mm0, %0\n\t"
- :"+m"(*p)
- :"m"(*pix),
- "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += line_size ;
- } while(--h);
+ "lea (%3, %3), %%eax \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm2 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" (%2, %3), %%mm2 \n\t"
+ "addl %%eax, %1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm2 \n\t"
+ "addl %%eax, %2 \n\t"
+ "addl %%eax, %1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" (%2, %3), %%mm2 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" (line_size)
+ :"%eax", "memory");
}
-static void DEF(sub_pixels_x2)( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
+static void DEF(avg_pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- DCTELEM *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- __asm __volatile(
- "pxor %%mm7, %%mm7":);
- do {
__asm __volatile(
- "movq 1%1, %%mm2\n\t"
- "movq %0, %%mm0\n\t"
- PAVGB" %1, %%mm2\n\t"
- "movq 8%0, %%mm1\n\t"
- "movq %%mm2, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "psubsw %%mm2, %%mm0\n\t"
- "psubsw %%mm3, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, 8%0\n\t"
- :"+m"(*p)
- :"m"(*pix)
- :"memory");
- pix += line_size;
- p += 8;
- } while (--h);
+ "lea (%3, %3), %%eax \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "subl %3, %2 \n\t"
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm2 \n\t"
+ "addl %%eax, %1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm2, %%mm1 \n\t"
+ "movq (%2, %3), %%mm3 \n\t"
+ "movq (%2, %%eax), %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm0 \n\t"
+ PAVGB" %%mm4, %%mm1 \n\t"
+ "movq %%mm0, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%eax) \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm0 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm0, %%mm1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "addl %%eax, %1 \n\t"
+ "movq (%2, %3), %%mm3 \n\t"
+ "movq (%2, %%eax), %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ PAVGB" %%mm4, %%mm1 \n\t"
+ "movq %%mm2, (%2, %3) \n\t"
+ "movq %%mm1, (%2, %%eax) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" (line_size)
+ :"%eax", "memory");
}
-static void DEF(sub_pixels_y2)( DCTELEM *block, const UINT8 *pixels, int line_size, int h)
+// Note this is not correctly rounded, but this function is only used for b frames so it doesnt matter
+static void DEF(avg_pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
- DCTELEM *p;
- const UINT8 *pix;
- p = block;
- pix = pixels;
- __asm __volatile(
- "pxor %%mm7, %%mm7":);
- do {
+ MOVQ_BONE(mm6);
__asm __volatile(
- "movq %2, %%mm2\n\t"
- "movq %0, %%mm0\n\t"
- PAVGB" %1, %%mm2\n\t"
- "movq 8%0, %%mm1\n\t"
- "movq %%mm2, %%mm3\n\t"
- "punpcklbw %%mm7, %%mm2\n\t"
- "punpckhbw %%mm7, %%mm3\n\t"
- "psubsw %%mm2, %%mm0\n\t"
- "psubsw %%mm3, %%mm1\n\t"
- "movq %%mm0, %0\n\t"
- "movq %%mm1, 8%0\n\t"
- :"+m"(*p)
- :"m"(*pix), "m"(*(pix+line_size))
- :"memory");
- pix += line_size;
- p += 8;
- } while (--h);
+ "lea (%3, %3), %%eax \n\t"
+ "movq (%1), %%mm0 \n\t"
+ PAVGB" 1(%1), %%mm0 \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1, %%eax), %%mm2 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "psubusb %%mm6, %%mm2 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ PAVGB" 1(%1, %%eax), %%mm2 \n\t"
+ "addl %%eax, %1 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm2, %%mm1 \n\t"
+ PAVGB" (%2), %%mm0 \n\t"
+ PAVGB" (%2, %3), %%mm1 \n\t"
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm0 \n\t"
+ PAVGB" 1(%1, %3), %%mm1 \n\t"
+ PAVGB" 1(%1, %%eax), %%mm0 \n\t"
+ "addl %%eax, %2 \n\t"
+ "addl %%eax, %1 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm0, %%mm1 \n\t"
+ PAVGB" (%2), %%mm2 \n\t"
+ PAVGB" (%2, %3), %%mm1 \n\t"
+ "movq %%mm2, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r" (line_size)
+ :"%eax", "memory");
}
-
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
new file mode 100644
index 000000000..dc70c9c8e
--- /dev/null
+++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
@@ -0,0 +1,305 @@
+/*
+ * DSP utils mmx functions are compiled twice for rnd/no_rnd
+ * Copyright (c) 2000, 2001 Fabrice Bellard.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ * mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
+ * and improved by Zdenek Kabelac <kabi@users.sf.net>
+ */
+
+// put_pixels
+static void DEF(put, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm __volatile(
+ "lea (%3, %3), %%eax \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm1 \n\t"
+ "movq (%1, %3), %%mm2 \n\t"
+ "movq 1(%1, %3), %%mm3 \n\t"
+ PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r"(line_size)
+ :"eax", "memory");
+}
+
+static void DEF(put, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm __volatile(
+ "lea (%3, %3), %%eax \n\t"
+ "movq (%1), %%mm0 \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax),%%mm2 \n\t"
+ PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax),%%mm0 \n\t"
+ PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
+ "movq %%mm4, (%2) \n\t"
+ "movq %%mm5, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r"(line_size)
+ :"eax", "memory");
+}
+
+static void DEF(put, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_ZERO(mm7);
+ SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
+ __asm __volatile(
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm4 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddusw %%mm0, %%mm4 \n\t"
+ "paddusw %%mm1, %%mm5 \n\t"
+ "xorl %%eax, %%eax \n\t"
+ "addl %3, %1 \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1, %%eax), %%mm0 \n\t"
+ "movq 1(%1, %%eax), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "paddusw %%mm2, %%mm0 \n\t"
+ "paddusw %%mm3, %%mm1 \n\t"
+ "paddusw %%mm6, %%mm4 \n\t"
+ "paddusw %%mm6, %%mm5 \n\t"
+ "paddusw %%mm0, %%mm4 \n\t"
+ "paddusw %%mm1, %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm5 \n\t"
+ "packuswb %%mm5, %%mm4 \n\t"
+ "movq %%mm4, (%2, %%eax) \n\t"
+ "addl %3, %%eax \n\t"
+
+ "movq (%1, %%eax), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
+ "movq 1(%1, %%eax), %%mm4 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddusw %%mm2, %%mm4 \n\t"
+ "paddusw %%mm3, %%mm5 \n\t"
+ "paddusw %%mm6, %%mm0 \n\t"
+ "paddusw %%mm6, %%mm1 \n\t"
+ "paddusw %%mm4, %%mm0 \n\t"
+ "paddusw %%mm5, %%mm1 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "psrlw $2, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%eax) \n\t"
+ "addl %3, %%eax \n\t"
+
+ "subl $2, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels)
+ :"D"(block), "r"(line_size)
+ :"eax", "memory");
+}
+
+// avg_pixels
+// in case more speed is needed - unroling would certainly help
+static void DEF(avg, pixels)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm __volatile(
+ "movq %0, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ "movq %%mm2, %0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ }
+ while (--h);
+}
+
+static void DEF(avg, pixels_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ JUMPALIGN();
+ do {
+ __asm __volatile(
+ "movq %1, %%mm0 \n\t"
+ "movq 1%1, %%mm1 \n\t"
+ "movq %0, %%mm3 \n\t"
+ PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
+ PAVGB(%%mm3, %%mm2, %%mm0, %%mm6)
+ "movq %%mm0, %0 \n\t"
+ :"+m"(*block)
+ :"m"(*pixels)
+ :"memory");
+ pixels += line_size;
+ block += line_size;
+ } while (--h);
+}
+
+static void DEF(avg, pixels_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_BFE(mm6);
+ __asm __volatile(
+ "lea (%3, %3), %%eax \n\t"
+ "movq (%1), %%mm0 \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm2 \n\t"
+ PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
+ "movq (%2), %%mm3 \n\t"
+ PAVGB(%%mm3, %%mm4, %%mm0, %%mm6)
+ "movq (%2, %3), %%mm3 \n\t"
+ PAVGB(%%mm3, %%mm5, %%mm1, %%mm6)
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+
+ "movq (%1, %3), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm0 \n\t"
+ PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
+ "movq (%2), %%mm3 \n\t"
+ PAVGB(%%mm3, %%mm4, %%mm0, %%mm6)
+ "movq (%2, %3), %%mm3 \n\t"
+ PAVGB(%%mm3, %%mm5, %%mm1, %%mm6)
+ "movq %%mm0, (%2) \n\t"
+ "movq %%mm1, (%2, %3) \n\t"
+ "addl %%eax, %1 \n\t"
+ "addl %%eax, %2 \n\t"
+
+ "subl $4, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels), "+D"(block)
+ :"r"(line_size)
+ :"eax", "memory");
+}
+
+// this routine is 'slightly' suboptimal but mostly unused
+static void DEF(avg, pixels_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
+{
+ MOVQ_ZERO(mm7);
+ SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
+ __asm __volatile(
+ "movq (%1), %%mm0 \n\t"
+ "movq 1(%1), %%mm4 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddusw %%mm0, %%mm4 \n\t"
+ "paddusw %%mm1, %%mm5 \n\t"
+ "xorl %%eax, %%eax \n\t"
+ "addl %3, %1 \n\t"
+ ".balign 8 \n\t"
+ "1: \n\t"
+ "movq (%1, %%eax), %%mm0 \n\t"
+ "movq 1(%1, %%eax), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpckhbw %%mm7, %%mm1 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "paddusw %%mm2, %%mm0 \n\t"
+ "paddusw %%mm3, %%mm1 \n\t"
+ "paddusw %%mm6, %%mm4 \n\t"
+ "paddusw %%mm6, %%mm5 \n\t"
+ "paddusw %%mm0, %%mm4 \n\t"
+ "paddusw %%mm1, %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm5 \n\t"
+ "movq (%2, %%eax), %%mm3 \n\t"
+ "packuswb %%mm5, %%mm4 \n\t"
+ "pcmpeqd %%mm2, %%mm2 \n\t"
+ "paddb %%mm2, %%mm2 \n\t"
+ PAVGB(%%mm3, %%mm4, %%mm5, %%mm2)
+ "movq %%mm5, (%2, %%eax) \n\t"
+ "addl %3, %%eax \n\t"
+
+ "movq (%1, %%eax), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
+ "movq 1(%1, %%eax), %%mm4 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpckhbw %%mm7, %%mm3 \n\t"
+ "punpckhbw %%mm7, %%mm5 \n\t"
+ "paddusw %%mm2, %%mm4 \n\t"
+ "paddusw %%mm3, %%mm5 \n\t"
+ "paddusw %%mm6, %%mm0 \n\t"
+ "paddusw %%mm6, %%mm1 \n\t"
+ "paddusw %%mm4, %%mm0 \n\t"
+ "paddusw %%mm5, %%mm1 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "psrlw $2, %%mm1 \n\t"
+ "movq (%2, %%eax), %%mm3 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "pcmpeqd %%mm2, %%mm2 \n\t"
+ "paddb %%mm2, %%mm2 \n\t"
+ PAVGB(%%mm3, %%mm0, %%mm1, %%mm2)
+ "movq %%mm1, (%2, %%eax) \n\t"
+ "addl %3, %%eax \n\t"
+
+ "subl $2, %0 \n\t"
+ "jnz 1b \n\t"
+ :"+g"(h), "+S"(pixels)
+ :"D"(block), "r"(line_size)
+ :"eax", "memory");
+}
diff --git a/src/libffmpeg/libavcodec/i386/fdct_mmx.c b/src/libffmpeg/libavcodec/i386/fdct_mmx.c
index e9d48383d..7135beb21 100644
--- a/src/libffmpeg/libavcodec/i386/fdct_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/fdct_mmx.c
@@ -1,6 +1,6 @@
/*
* MMX optimized forward DCT
- * The gcc porting is Copyright (c) 2001 Gerard Lantau.
+ * The gcc porting is Copyright (c) 2001 Fabrice Bellard.
*
* from fdctam32.c - AP922 MMX(3D-Now) forward-DCT
*
@@ -10,7 +10,7 @@
#include "../common.h"
#include "mmx.h"
-//#define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align)))
+#define ATTR_ALIGN(align) __attribute__ ((__aligned__ (align)))
//////////////////////////////////////////////////////////////////////
//
diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx.c b/src/libffmpeg/libavcodec/i386/idct_mmx.c
index 618c1cfde..298c8a8b0 100644
--- a/src/libffmpeg/libavcodec/i386/idct_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/idct_mmx.c
@@ -528,8 +528,12 @@ static inline void idct_col (int16_t * col, int offset)
movq_r2m (mm3, *(col+offset+4*8)); // save y4
movq_r2m (mm4, *(col+offset+3*8)); // save y3
-}
+#undef T1
+#undef T2
+#undef T3
+#undef C4
+}
static int32_t rounder0[] ATTR_ALIGN(8) =
rounder ((1 << (COL_SHIFT - 1)) - 0.5);
@@ -547,6 +551,8 @@ static int32_t rounder3[] ATTR_ALIGN(8) =
static int32_t rounder5[] ATTR_ALIGN(8) =
rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */
+#undef COL_SHIFT
+#undef ROW_SHIFT
#define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \
void idct (int16_t * block) \
diff --git a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c b/src/libffmpeg/libavcodec/i386/motion_est_mmx.c
index e704c4219..9b76cdb07 100644
--- a/src/libffmpeg/libavcodec/i386/motion_est_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/motion_est_mmx.c
@@ -1,20 +1,20 @@
/*
* MMX optimized motion estimation
- * Copyright (c) 2001 Gerard Lantau.
+ * Copyright (c) 2001 Fabrice Bellard.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
*
- * This program is distributed in the hope that it will be useful,
+ * This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* mostly by Michael Niedermayer <michaelni@gmx.at>
*/
@@ -26,6 +26,8 @@ static const __attribute__ ((aligned(8))) UINT64 round_tab[3]={
0x0002000200020002,
};
+static __attribute__ ((aligned(8))) uint64_t bone= 0x0101010101010101LL;
+
static inline void sad8_mmx(UINT8 *blk1, UINT8 *blk2, int stride, int h)
{
int len= -(stride<<h);
@@ -115,6 +117,7 @@ static inline void sad8_4_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
int len= -(stride<<h);
asm volatile(
".balign 16 \n\t"
+ "movq "MANGLE(bone)", %%mm5 \n\t"
"1: \n\t"
"movq (%1, %%eax), %%mm0 \n\t"
"movq (%2, %%eax), %%mm2 \n\t"
@@ -122,6 +125,7 @@ static inline void sad8_4_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
"movq 1(%2, %%eax), %%mm3 \n\t"
"pavgb %%mm2, %%mm0 \n\t"
"pavgb %%mm1, %%mm3 \n\t"
+ "psubusb %%mm5, %%mm3 \n\t"
"pavgb %%mm3, %%mm0 \n\t"
"movq (%3, %%eax), %%mm2 \n\t"
"psadbw %%mm2, %%mm0 \n\t"
@@ -132,6 +136,7 @@ static inline void sad8_4_mmx2(UINT8 *blk1, UINT8 *blk2, int stride, int h)
"movq 1(%2, %%eax), %%mm4 \n\t"
"pavgb %%mm3, %%mm1 \n\t"
"pavgb %%mm4, %%mm2 \n\t"
+ "psubusb %%mm5, %%mm2 \n\t"
"pavgb %%mm1, %%mm2 \n\t"
"movq (%3, %%eax), %%mm1 \n\t"
"psadbw %%mm1, %%mm2 \n\t"
diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c
index b7a782f56..390aa554c 100644
--- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c
@@ -1,34 +1,30 @@
/*
* The simplest mpeg encoder (well, it was the simplest!)
- * Copyright (c) 2000,2001 Gerard Lantau.
+ * Copyright (c) 2000,2001 Fabrice Bellard.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
*
- * This program is distributed in the hope that it will be useful,
+ * This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Optimized for ia32 cpus by Nick Kurshev <nickols_k@mail.ru>
- * h263 dequantizer by Michael Niedermayer <michaelni@gmx.at>
+ * h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at>
*/
-#include "xine-utils/xineutils.h"
#include "../dsputil.h"
#include "../mpegvideo.h"
#include "../avcodec.h"
-#include "../mangle.h"
extern UINT8 zigzag_end[64];
-extern void (*draw_edges)(UINT8 *buf, int wrap, int width, int height, int w);
-extern int (*dct_quantize)(MpegEncContext *s, DCTELEM *block, int n, int qscale);
extern UINT8 zigzag_direct_noperm[64];
extern UINT16 inv_zigzag_direct16[64];
@@ -195,103 +191,86 @@ asm volatile(
static void dct_unquantize_mpeg1_mmx(MpegEncContext *s,
DCTELEM *block, int n, int qscale)
{
- int i, level, nCoeffs;
+ int nCoeffs;
const UINT16 *quant_matrix;
if(s->alternate_scan) nCoeffs= 64;
else nCoeffs= nCoeffs= zigzag_end[ s->block_last_index[n] ];
if (s->mb_intra) {
+ int block0;
if (n < 4)
- block[0] = block[0] * s->y_dc_scale;
+ block0 = block[0] * s->y_dc_scale;
else
- block[0] = block[0] * s->c_dc_scale;
- /* isnt used anymore (we have a h263 unquantizer since some time)
- if (s->out_format == FMT_H263) {
- i = 1;
- goto unquant_even;
- }*/
+ block0 = block[0] * s->c_dc_scale;
/* XXX: only mpeg1 */
quant_matrix = s->intra_matrix;
- i=1;
- /* Align on 4 elements boundary */
- while(i&3)
- {
- level = block[i];
- if (level) {
- if (level < 0) level = -level;
- level = (int)(level * qscale * quant_matrix[i]) >> 3;
- level = (level - 1) | 1;
- if (block[i] < 0) level = -level;
- block[i] = level;
- }
- i++;
- }
- __asm __volatile(
- "movd %0, %%mm6\n\t" /* mm6 = qscale | 0 */
- "punpckldq %%mm6, %%mm6\n\t" /* mm6 = qscale | qscale */
- "movq %2, %%mm4\n\t"
- "movq %%mm6, %%mm7\n\t"
- "movq %1, %%mm5\n\t"
- "packssdw %%mm6, %%mm7\n\t" /* mm7 = qscale | qscale | qscale | qscale */
- "pxor %%mm6, %%mm6\n\t"
- ::"g"(qscale),"m"(mm_wone),"m"(mm_wabs):"memory");
- for(;i<nCoeffs;i+=4) {
- __asm __volatile(
- "movq %1, %%mm0\n\t"
- "movq %%mm7, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm0, %%mm3\n\t"
- "pcmpgtw %%mm6, %%mm2\n\t"
- "pmullw %2, %%mm1\n\t"
- "pandn %%mm4, %%mm2\n\t"
- "por %%mm5, %%mm2\n\t"
- "pmullw %%mm2, %%mm0\n\t" /* mm0 = abs(block[i]). */
-
- "pcmpeqw %%mm6, %%mm3\n\t"
- "pmullw %%mm0, %%mm1\n\t"
- "psraw $3, %%mm1\n\t"
- "psubw %%mm5, %%mm1\n\t" /* block[i] --; */
- "pandn %%mm4, %%mm3\n\t" /* fake of pcmpneqw : mm0 != 0 then mm1 = -1 */
- "por %%mm5, %%mm1\n\t" /* block[i] |= 1 */
- "pmullw %%mm2, %%mm1\n\t" /* change signs again */
-
- "pand %%mm3, %%mm1\n\t" /* nullify if was zero */
- "movq %%mm1, %0"
- :"=m"(block[i])
- :"m"(block[i]), "m"(quant_matrix[i])
- :"memory");
- }
- } else {
- i = 0;
-// unquant_even:
- quant_matrix = s->non_intra_matrix;
- /* Align on 4 elements boundary */
- while(i&7)
- {
- level = block[i];
- if (level) {
- if (level < 0) level = -level;
- level = (((level << 1) + 1) * qscale *
- ((int) quant_matrix[i])) >> 4;
- level = (level - 1) | 1;
- if(block[i] < 0) level = -level;
- block[i] = level;
- }
- i++;
- }
asm volatile(
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $15, %%mm7 \n\t"
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
+ "movl %3, %%eax \n\t"
".balign 16\n\t"
"1: \n\t"
- "movq (%0, %3), %%mm0 \n\t"
- "movq 8(%0, %3), %%mm1 \n\t"
- "movq (%1, %3), %%mm4 \n\t"
- "movq 8(%1, %3), %%mm5 \n\t"
+ "movq (%0, %%eax), %%mm0 \n\t"
+ "movq 8(%0, %%eax), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm4 \n\t"
+ "movq 8(%1, %%eax), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
+ "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
+ "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
+ "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
+ "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" // FIXME slow
+ "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
+ "psraw $3, %%mm0 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+ "psubw %%mm7, %%mm1 \n\t"
+ "por %%mm7, %%mm0 \n\t"
+ "por %%mm7, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "movq %%mm4, (%0, %%eax) \n\t"
+ "movq %%mm5, 8(%0, %%eax) \n\t"
+
+ "addl $16, %%eax \n\t"
+ "js 1b \n\t"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
+ : "%eax", "memory"
+ );
+ block[0]= block0;
+
+ } else {
+ quant_matrix = s->inter_matrix;
+asm volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $15, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "movl %3, %%eax \n\t"
+ ".balign 16\n\t"
+ "1: \n\t"
+ "movq (%0, %%eax), %%mm0 \n\t"
+ "movq 8(%0, %%eax), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm4 \n\t"
+ "movq 8(%1, %%eax), %%mm5 \n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
@@ -310,8 +289,8 @@ asm volatile(
"pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %3), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %3), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
"psraw $4, %%mm0 \n\t"
"psraw $4, %%mm1 \n\t"
"psubw %%mm7, %%mm0 \n\t"
@@ -324,13 +303,145 @@ asm volatile(
"psubw %%mm3, %%mm1 \n\t"
"pandn %%mm0, %%mm4 \n\t"
"pandn %%mm1, %%mm5 \n\t"
- "movq %%mm4, (%0, %3) \n\t"
- "movq %%mm5, 8(%0, %3) \n\t"
+ "movq %%mm4, (%0, %%eax) \n\t"
+ "movq %%mm5, 8(%0, %%eax) \n\t"
- "addl $16, %3 \n\t"
+ "addl $16, %%eax \n\t"
"js 1b \n\t"
- ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "r" (2*(i-nCoeffs))
- : "memory"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
+ : "%eax", "memory"
+ );
+ }
+}
+
+static void dct_unquantize_mpeg2_mmx(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+ int nCoeffs;
+ const UINT16 *quant_matrix;
+
+ if(s->alternate_scan) nCoeffs= 64;
+ else nCoeffs= nCoeffs= zigzag_end[ s->block_last_index[n] ];
+
+ if (s->mb_intra) {
+ int block0;
+ if (n < 4)
+ block0 = block[0] * s->y_dc_scale;
+ else
+ block0 = block[0] * s->c_dc_scale;
+ quant_matrix = s->intra_matrix;
+asm volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $15, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "movl %3, %%eax \n\t"
+ ".balign 16\n\t"
+ "1: \n\t"
+ "movq (%0, %%eax), %%mm0 \n\t"
+ "movq 8(%0, %%eax), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm4 \n\t"
+ "movq 8(%1, %%eax), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
+ "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
+ "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
+ "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
+ "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" // FIXME slow
+ "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
+ "psraw $3, %%mm0 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "movq %%mm4, (%0, %%eax) \n\t"
+ "movq %%mm5, 8(%0, %%eax) \n\t"
+
+ "addl $16, %%eax \n\t"
+ "js 1b \n\t"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
+ : "%eax", "memory"
+ );
+ block[0]= block0;
+ //Note, we dont do mismatch control for intra as errors cannot accumulate
+
+ } else {
+ quant_matrix = s->inter_matrix;
+asm volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlq $48, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "movl %3, %%eax \n\t"
+ ".balign 16\n\t"
+ "1: \n\t"
+ "movq (%0, %%eax), %%mm0 \n\t"
+ "movq 8(%0, %%eax), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm4 \n\t"
+ "movq 8(%1, %%eax), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
+ "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
+ "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
+ "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
+ "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
+ "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
+ "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*2*q
+ "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*2*q
+ "paddw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
+ "paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" // FIXME slow
+ "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
+ "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
+ "psrlw $4, %%mm0 \n\t"
+ "psrlw $4, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "pxor %%mm4, %%mm7 \n\t"
+ "pxor %%mm5, %%mm7 \n\t"
+ "movq %%mm4, (%0, %%eax) \n\t"
+ "movq %%mm5, 8(%0, %%eax) \n\t"
+
+ "addl $16, %%eax \n\t"
+ "js 1b \n\t"
+ "movd 124(%0, %3), %%mm0 \n\t"
+ "movq %%mm7, %%mm6 \n\t"
+ "psrlq $32, %%mm7 \n\t"
+ "pxor %%mm6, %%mm7 \n\t"
+ "movq %%mm7, %%mm6 \n\t"
+ "psrlq $16, %%mm7 \n\t"
+ "pxor %%mm6, %%mm7 \n\t"
+ "pslld $31, %%mm7 \n\t"
+ "psrlq $15, %%mm7 \n\t"
+ "pxor %%mm7, %%mm0 \n\t"
+ "movd %%mm0, 124(%0, %3) \n\t"
+
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "r" (-2*nCoeffs)
+ : "%eax", "memory"
);
}
}
@@ -441,18 +552,16 @@ void unused_var_warning_killer(){
void MPV_common_init_mmx(MpegEncContext *s)
{
if (mm_flags & MM_MMX) {
- if (s->out_format == FMT_H263)
- s->dct_unquantize = dct_unquantize_h263_mmx;
- else
- s->dct_unquantize = dct_unquantize_mpeg1_mmx;
-
- draw_edges = draw_edges_mmx;
-
- if(mm_flags & MM_MMXEXT){
- dct_quantize= dct_quantize_MMX2;
- }else{
- dct_quantize= dct_quantize_MMX;
- }
+ s->dct_unquantize_h263 = dct_unquantize_h263_mmx;
+ s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_mmx;
+ s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_mmx;
+
+ draw_edges = draw_edges_mmx;
+
+ if(mm_flags & MM_MMXEXT){
+ dct_quantize= dct_quantize_MMX2;
+ } else {
+ dct_quantize= dct_quantize_MMX;
+ }
}
}
-
diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
index 2b3322915..aed537a23 100644
--- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
+++ b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
@@ -1,21 +1,22 @@
/*
- Copyright (C) 2002 Michael Niedermayer <michaelni@gmx.at>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
+ * MPEG video MMX templates
+ *
+ * Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
#undef SPREADW
#undef PMAXW
#ifdef HAVE_MMX2
@@ -33,149 +34,165 @@
static int RENAME(dct_quantize)(MpegEncContext *s,
DCTELEM *block, int n,
- int qscale)
+ int qscale, int *overflow)
{
- int i, level, last_non_zero_p1, q;
- const UINT16 *qmat;
+ int level=0, last_non_zero_p1, q; //=0 is cuz gcc says uninitalized ...
+ const UINT16 *qmat, *bias;
static __align8 INT16 temp_block[64];
- int minLevel, maxLevel;
-
- if(s->avctx!=NULL && s->avctx->codec->id==CODEC_ID_MPEG4){
- /* mpeg4 */
- minLevel= -2048;
- maxLevel= 2047;
- }else if(s->out_format==FMT_MPEG1){
- /* mpeg1 */
- minLevel= -255;
- maxLevel= 255;
- }else if(s->out_format==FMT_MJPEG){
- /* (m)jpeg */
- minLevel= -1023;
- maxLevel= 1023;
- }else{
- /* h263 / msmpeg4 */
- minLevel= -128;
- maxLevel= 127;
- }
av_fdct (block);
-
+
if (s->mb_intra) {
int dummy;
if (n < 4)
q = s->y_dc_scale;
else
q = s->c_dc_scale;
-
/* note: block[0] is assumed to be positive */
+ if (!s->h263_aic) {
#if 1
- asm volatile (
- "xorl %%edx, %%edx \n\t"
- "mul %%ecx \n\t"
- : "=d" (temp_block[0]), "=a"(dummy)
- : "a" (block[0] + (q >> 1)), "c" (inverse[q])
- );
+ asm volatile (
+ "xorl %%edx, %%edx \n\t"
+ "mul %%ecx \n\t"
+ : "=d" (level), "=a"(dummy)
+ : "a" (block[0] + (q >> 1)), "c" (inverse[q])
+ );
#else
- asm volatile (
- "xorl %%edx, %%edx \n\t"
- "divw %%cx \n\t"
- "movzwl %%ax, %%eax \n\t"
- : "=a" (temp_block[0])
- : "a" (block[0] + (q >> 1)), "c" (q)
- : "%edx"
- );
+ asm volatile (
+ "xorl %%edx, %%edx \n\t"
+ "divw %%cx \n\t"
+ "movzwl %%ax, %%eax \n\t"
+ : "=a" (level)
+ : "a" (block[0] + (q >> 1)), "c" (q)
+ : "%edx"
+ );
#endif
+ } else
+ /* For AIC we skip quant/dequant of INTRADC */
+ level = block[0];
+
+ block[0]=0; //avoid fake overflow
// temp_block[0] = (block[0] + (q >> 1)) / q;
- i = 1;
last_non_zero_p1 = 1;
- if (s->out_format == FMT_H263) {
- qmat = s->q_non_intra_matrix16;
- } else {
- qmat = s->q_intra_matrix16;
- }
- for(i=1;i<4;i++) {
- level = block[i] * qmat[i];
- level = level / (1 << (QMAT_SHIFT_MMX - 3));
- /* XXX: currently, this code is not optimal. the range should be:
- mpeg1: -255..255
- mpeg2: -2048..2047
- h263: -128..127
- mpeg4: -2048..2047
- */
- if (level > maxLevel)
- level = maxLevel;
- else if (level < minLevel)
- level = minLevel;
- temp_block[i] = level;
-
- if(level)
- if(last_non_zero_p1 < inv_zigzag_direct16[i]) last_non_zero_p1= inv_zigzag_direct16[i];
- block[i]=0;
- }
+ bias = s->q_intra_matrix16_bias[qscale];
+ qmat = s->q_intra_matrix16[qscale];
} else {
- i = 0;
last_non_zero_p1 = 0;
- qmat = s->q_non_intra_matrix16;
+ bias = s->q_inter_matrix16_bias[qscale];
+ qmat = s->q_inter_matrix16[qscale];
}
- asm volatile( /* XXX: small rounding bug, but it shouldnt matter */
- "movd %3, %%mm3 \n\t"
- SPREADW(%%mm3)
- "movd %4, %%mm4 \n\t"
- SPREADW(%%mm4)
-#ifndef HAVE_MMX2
- "movd %5, %%mm5 \n\t"
- SPREADW(%%mm5)
-#endif
- "pxor %%mm7, %%mm7 \n\t"
- "movd %%eax, %%mm2 \n\t"
- SPREADW(%%mm2)
- "movl %6, %%eax \n\t"
- ".balign 16 \n\t"
- "1: \n\t"
- "movq (%1, %%eax), %%mm0 \n\t"
- "movq (%2, %%eax), %%mm1 \n\t"
- "movq %%mm0, %%mm6 \n\t"
- "psraw $15, %%mm6 \n\t"
- "pmulhw %%mm0, %%mm1 \n\t"
- "psubsw %%mm6, %%mm1 \n\t"
-#ifdef HAVE_MMX2
- "pminsw %%mm3, %%mm1 \n\t"
- "pmaxsw %%mm4, %%mm1 \n\t"
-#else
- "paddsw %%mm3, %%mm1 \n\t"
- "psubusw %%mm4, %%mm1 \n\t"
- "paddsw %%mm5, %%mm1 \n\t"
-#endif
- "movq %%mm1, (%8, %%eax) \n\t"
- "pcmpeqw %%mm7, %%mm1 \n\t"
- "movq (%7, %%eax), %%mm0 \n\t"
- "movq %%mm7, (%1, %%eax) \n\t"
- "pandn %%mm0, %%mm1 \n\t"
- PMAXW(%%mm1, %%mm2)
- "addl $8, %%eax \n\t"
- " js 1b \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "psrlq $32, %%mm2 \n\t"
- PMAXW(%%mm0, %%mm2)
- "movq %%mm2, %%mm0 \n\t"
- "psrlq $16, %%mm2 \n\t"
- PMAXW(%%mm0, %%mm2)
- "movd %%mm2, %%eax \n\t"
- "movzbl %%al, %%eax \n\t"
- : "+a" (last_non_zero_p1)
- : "r" (block+64), "r" (qmat+64),
-#ifdef HAVE_MMX2
- "m" (maxLevel), "m" (minLevel), "m" (minLevel /* dummy */), "g" (2*i - 128),
-#else
- "m" (0x7FFF - maxLevel), "m" (0x7FFF -maxLevel + minLevel), "m" (minLevel), "g" (2*i - 128),
-#endif
- "r" (inv_zigzag_direct16+64), "r" (temp_block+64)
- );
+ if(s->out_format == FMT_H263){
+
+ asm volatile(
+ "movd %%eax, %%mm3 \n\t" // last_non_zero_p1
+ SPREADW(%%mm3)
+ "pxor %%mm7, %%mm7 \n\t" // 0
+ "pxor %%mm4, %%mm4 \n\t" // 0
+ "movq (%2), %%mm5 \n\t" // qmat[0]
+ "pxor %%mm6, %%mm6 \n\t"
+ "psubw (%3), %%mm6 \n\t" // -bias[0]
+ "movl $-128, %%eax \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ "pxor %%mm1, %%mm1 \n\t" // 0
+ "movq (%1, %%eax), %%mm0 \n\t" // block[i]
+ "pcmpgtw %%mm0, %%mm1 \n\t" // block[i] <= 0 ? 0xFF : 0x00
+ "pxor %%mm1, %%mm0 \n\t"
+ "psubw %%mm1, %%mm0 \n\t" // ABS(block[i])
+ "psubusw %%mm6, %%mm0 \n\t" // ABS(block[i]) + bias[0]
+ "pmulhw %%mm5, %%mm0 \n\t" // (ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16
+ "por %%mm0, %%mm4 \n\t"
+ "pxor %%mm1, %%mm0 \n\t"
+ "psubw %%mm1, %%mm0 \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
+ "movq %%mm0, (%5, %%eax) \n\t"
+ "pcmpeqw %%mm7, %%mm0 \n\t" // out==0 ? 0xFF : 0x00
+ "movq (%4, %%eax), %%mm1 \n\t"
+ "movq %%mm7, (%1, %%eax) \n\t" // 0
+ "pandn %%mm1, %%mm0 \n\t"
+ PMAXW(%%mm0, %%mm3)
+ "addl $8, %%eax \n\t"
+ " js 1b \n\t"
+ "movq %%mm3, %%mm0 \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ PMAXW(%%mm0, %%mm3)
+ "movq %%mm3, %%mm0 \n\t"
+ "psrlq $16, %%mm3 \n\t"
+ PMAXW(%%mm0, %%mm3)
+ "movd %%mm3, %%eax \n\t"
+ "movzbl %%al, %%eax \n\t" // last_non_zero_p1
+ : "+a" (last_non_zero_p1)
+ : "r" (block+64), "r" (qmat), "r" (bias),
+ "r" (inv_zigzag_direct16+64), "r" (temp_block+64)
+ );
+ // note the asm is split cuz gcc doesnt like that many operands ...
+ asm volatile(
+ "movd %1, %%mm1 \n\t" // max_qcoeff
+ SPREADW(%%mm1)
+ "psubusw %%mm1, %%mm4 \n\t"
+ "packuswb %%mm4, %%mm4 \n\t"
+ "movd %%mm4, %0 \n\t" // *overflow
+ : "=g" (*overflow)
+ : "g" (s->max_qcoeff)
+ );
+ }else{ // FMT_H263
+ asm volatile(
+ "movd %%eax, %%mm3 \n\t" // last_non_zero_p1
+ SPREADW(%%mm3)
+ "pxor %%mm7, %%mm7 \n\t" // 0
+ "pxor %%mm4, %%mm4 \n\t" // 0
+ "movl $-128, %%eax \n\t"
+ ".balign 16 \n\t"
+ "1: \n\t"
+ "pxor %%mm1, %%mm1 \n\t" // 0
+ "movq (%1, %%eax), %%mm0 \n\t" // block[i]
+ "pcmpgtw %%mm0, %%mm1 \n\t" // block[i] <= 0 ? 0xFF : 0x00
+ "pxor %%mm1, %%mm0 \n\t"
+ "psubw %%mm1, %%mm0 \n\t" // ABS(block[i])
+ "movq (%3, %%eax), %%mm6 \n\t" // bias[0]
+ "paddusw %%mm6, %%mm0 \n\t" // ABS(block[i]) + bias[0]
+ "movq (%2, %%eax), %%mm5 \n\t" // qmat[i]
+ "pmulhw %%mm5, %%mm0 \n\t" // (ABS(block[i])*qmat[0] + bias[0]*qmat[0])>>16
+ "por %%mm0, %%mm4 \n\t"
+ "pxor %%mm1, %%mm0 \n\t"
+ "psubw %%mm1, %%mm0 \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
+ "movq %%mm0, (%5, %%eax) \n\t"
+ "pcmpeqw %%mm7, %%mm0 \n\t" // out==0 ? 0xFF : 0x00
+ "movq (%4, %%eax), %%mm1 \n\t"
+ "movq %%mm7, (%1, %%eax) \n\t" // 0
+ "pandn %%mm1, %%mm0 \n\t"
+ PMAXW(%%mm0, %%mm3)
+ "addl $8, %%eax \n\t"
+ " js 1b \n\t"
+ "movq %%mm3, %%mm0 \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ PMAXW(%%mm0, %%mm3)
+ "movq %%mm3, %%mm0 \n\t"
+ "psrlq $16, %%mm3 \n\t"
+ PMAXW(%%mm0, %%mm3)
+ "movd %%mm3, %%eax \n\t"
+ "movzbl %%al, %%eax \n\t" // last_non_zero_p1
+ : "+a" (last_non_zero_p1)
+ : "r" (block+64), "r" (qmat+64), "r" (bias+64),
+ "r" (inv_zigzag_direct16+64), "r" (temp_block+64)
+ );
+ // note the asm is split cuz gcc doesnt like that many operands ...
+ asm volatile(
+ "movd %1, %%mm1 \n\t" // max_qcoeff
+ SPREADW(%%mm1)
+ "psubusw %%mm1, %%mm4 \n\t"
+ "packuswb %%mm4, %%mm4 \n\t"
+ "movd %%mm4, %0 \n\t" // *overflow
+ : "=g" (*overflow)
+ : "g" (s->max_qcoeff)
+ );
+ }
+
+ if(s->mb_intra) temp_block[0]= level; //FIXME move afer permute
// last_non_zero_p1=64;
/* permute for IDCT */
asm volatile(
- "movl %0, %%eax \n\t"
+ "movl %0, %%eax \n\t"
"pushl %%ebp \n\t"
"movl %%esp, " MANGLE(esp_temp) "\n\t"
"1: \n\t"
@@ -203,5 +220,6 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
}
*/
//block_permute(block);
+
return last_non_zero_p1 - 1;
}
diff --git a/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c b/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c
index 297f23724..4f19cc20a 100644
--- a/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/simple_idct_mmx.c
@@ -1,29 +1,43 @@
/*
- Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <inttypes.h>
+ * Simple IDCT MMX
+ *
+ * Copyright (c) 2001, 2002 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
#include "../dsputil.h"
+/*
+23170.475006
+22725.260826
+21406.727617
+19265.545870
+16384.000000
+12872.826198
+8866.956905
+4520.335430
+*/
#define C0 23170 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
#define C1 22725 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
#define C2 21407 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
#define C3 19266 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#if 0
#define C4 16384 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+#else
+#define C4 16383 //cos(i*M_PI/16)*sqrt(2)*(1<<14) - 0.5
+#endif
#define C5 12873 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
#define C6 8867 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
#define C7 4520 //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
@@ -31,8 +45,8 @@
#define ROW_SHIFT 11
#define COL_SHIFT 20 // 6
-static uint64_t __attribute__((aligned(8))) wm1010= 0xFFFF0000FFFF0000ULL;
-static uint64_t __attribute__((aligned(8))) d40000= 0x0000000000040000ULL;
+static const uint64_t __attribute__((aligned(8))) wm1010= 0xFFFF0000FFFF0000ULL;
+static const uint64_t __attribute__((aligned(8))) d40000= 0x0000000000040000ULL;
static int16_t __attribute__((aligned(8))) temp[64];
static int16_t __attribute__((aligned(8))) coeffs[]= {
1<<(ROW_SHIFT-1), 0, 1<<(ROW_SHIFT-1), 0,
@@ -43,27 +57,31 @@ static int16_t __attribute__((aligned(8))) coeffs[]= {
// 0, 0, 0, 0,
// 0, 0, 0, 0,
- C4, C2, C4, C2,
- C4, C6, C4, C6,
- C1, C3, C1, C3,
- C5, C7, C5, C7,
+ C4, C4, C4, C4,
+ C4, -C4, C4, -C4,
+
+ C2, C6, C2, C6,
+ C6, -C2, C6, -C2,
+
+ C1, C3, C1, C3,
+ C5, C7, C5, C7,
+
+ C3, -C7, C3, -C7,
+-C1, -C5, -C1, -C5,
+
+ C5, -C1, C5, -C1,
+ C7, C3, C7, C3,
+
+ C7, -C5, C7, -C5,
+ C3, -C1, C3, -C1
+};
- C4, C6, C4, C6,
- -C4, -C2, -C4, -C2,
- C3, -C7, C3, -C7,
- -C1, -C5, -C1, -C5,
-
- C4, -C6, C4, -C6,
- -C4, C2, -C4, C2,
- C5, -C1, C5, -C1,
- C7, C3, C7, C3,
-
- C4, -C2, C4, -C2,
- C4, -C6, C4, -C6,
- C7, -C5, C7, -C5,
- C3, -C1, C3, -C1
- };
#if 0
+static void unused_var_killer(){
+ int a= wm1010 + d40000;
+ temp[0]=a;
+}
+
static void inline idctCol (int16_t * col, int16_t *input)
{
#undef C0
@@ -79,7 +97,7 @@ static void inline idctCol (int16_t * col, int16_t *input)
const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
- const int C4 = 16384; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
@@ -128,7 +146,7 @@ static void inline idctRow (int16_t * output, int16_t * input)
const int C1 = 22725; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C2 = 21407; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C3 = 19266; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
- const int C4 = 16384; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
+ const int C4 = 16383; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C5 = 12873; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C6 = 8867; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
const int C7 = 4520; //cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5
@@ -188,110 +206,160 @@ row[7] = input[13];
static inline void idct(int16_t *block)
{
- int i;
-//for(i=0; i<64; i++) temp[i]= block[ block_permute_op(i) ];
-//for(i=0; i<64; i++) temp[block_permute_op(i)]= block[ i ];
-//for(i=0; i<64; i++) block[i]= temp[i];
-//block_permute(block);
-/*
-idctRow(temp, block);
-idctRow(temp+16, block+16);
-idctRow(temp+1, block+2);
-idctRow(temp+17, block+18);
-idctRow(temp+32, block+32);
-idctRow(temp+48, block+48);
-idctRow(temp+33, block+34);
-idctRow(temp+49, block+50);
-*/
-
asm volatile(
-// "lea 64(%0), %%eax \n\t"
-//r0,r2,R0,R2 r4,r6,R4,R6 r1,r3,R1,R3 r5,r7,R5,R7
-//src0 src4 src1 src5
-//r0,R0,r7,R7 r1,R1,r6,R6 r2,R2,r5,R5 r3,R3,r4,R4
-//dst0 dst1 dst2 dst3
#if 0 //Alternative, simpler variant
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
+
+#define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
#rounder ", %%mm4 \n\t"\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B0 b0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
"paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
"psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE0(%%mm6, %%mm4, dst) \
-\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
- "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A1 a1 */\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
+
+#define COL_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
#rounder ", %%mm4 \n\t"\
-\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ #rounder ", %%mm0 \n\t"\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE1(%%mm6, %%mm4, dst, %%mm7) \
-\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
- #rounder ", %%mm4 \n\t"\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
"psrad $" #shift ", %%mm6 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
- #rounder ", %%mm0 \n\t"\
- "paddd %%mm3, %%mm2 \n\t" /* B3 b3 */\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
- "psrad $" #shift ", %%mm2 \n\t"\
- "psrad $" #shift ", %%mm0 \n\t"\
- WRITE2(%%mm6, %%mm4, %%mm2, %%mm0, dst)
-
-#define DC_COND_IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"\
+
+
+#define DC_COND_ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
- "movq wm1010, %%mm4 \n\t"\
+ "movq "MANGLE(wm1010)", %%mm4 \n\t"\
"pand %%mm0, %%mm4 \n\t"\
"por %%mm1, %%mm4 \n\t"\
"por %%mm2, %%mm4 \n\t"\
@@ -300,234 +368,106 @@ idctRow(temp+49, block+50);
"movd %%mm4, %%eax \n\t"\
"orl %%eax, %%eax \n\t"\
"jz 1f \n\t"\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
#rounder ", %%mm4 \n\t"\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B0 b0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
"paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE0(%%mm6, %%mm4, dst) \
-\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
- "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A1 a1 */\
- #rounder ", %%mm4 \n\t"\
-\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE1(%%mm6, %%mm4, dst, %%mm7) \
-\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
- #rounder ", %%mm4 \n\t"\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
- #rounder ", %%mm0 \n\t"\
- "paddd %%mm3, %%mm2 \n\t" /* B3 b3 */\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
"psrad $" #shift ", %%mm2 \n\t"\
"psrad $" #shift ", %%mm0 \n\t"\
- WRITE2(%%mm6, %%mm4, %%mm2, %%mm0, dst)\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
"jmp 2f \n\t"\
"1: \n\t"\
- WRITE3(%%mm0, dst)\
- "2: \n\t"\
-
-
-#define WRITE0(s0, s7, dst)\
- "movq " #s0 ", " #dst " \n\t" /* R0 r0 */\
- "movq " #s7 ", 24+" #dst " \n\t" /* R7 r7 */
-
-#define WRITE1(s1, s6, dst, tmp)\
- "movq " #dst ", " #tmp " \n\t" /* R0 r0 */\
- "packssdw " #s1 ", " #tmp " \n\t" /* R1 r1 R0 r0*/\
- "movq " #tmp ", " #dst " \n\t"\
- "movq 24+" #dst ", " #tmp " \n\t" /* R7 r7 */\
- "packssdw " #tmp ", " #s6 " \n\t" /* R7 r7 R6 r6*/\
- "movq " #s6 ", 24+" #dst " \n\t"
-
-#define WRITE2(s2, s5, s3, s4, dst)\
- "packssdw " #s3 ", " #s2 " \n\t" /* R3 r3 R2 r2*/\
- "packssdw " #s5 ", " #s4 " \n\t" /* R5 r5 R4 r4*/\
- "movq " #s2 ", 8+" #dst " \n\t"\
- "movq " #s4 ", 16+" #dst " \n\t"
-
-#define WRITE3(a, dst)\
- "pslld $16, " #a " \n\t"\
- "psrad $13, " #a " \n\t"\
- "packssdw " #a ", " #a " \n\t"\
- "movq " #a ", " #dst " \n\t"\
- "movq " #a ", 8+" #dst " \n\t"\
- "movq " #a ", 16+" #dst " \n\t"\
- "movq " #a ", 24+" #dst " \n\t"\
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( (%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11)
-/*
-DC_COND_IDCT_CORE( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11)
-DC_COND_IDCT_CORE( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11)
-DC_COND_IDCT_CORE( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11)
-*/
-IDCT_CORE( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11)
-IDCT_CORE( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11)
-IDCT_CORE( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11)
+ "pslld $16, %%mm0 \n\t"\
+ "#paddd "MANGLE(d40000)", %%mm0 \n\t"\
+ "psrad $13, %%mm0 \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t"\
+ "movq %%mm0, " #dst " \n\t"\
+ "movq %%mm0, 8+" #dst " \n\t"\
+ "movq %%mm0, 16+" #dst " \n\t"\
+ "movq %%mm0, 24+" #dst " \n\t"\
+ "2: \n\t"
-#undef WRITE0
-#undef WRITE1
-#undef WRITE2
-#define WRITE0(s0, s7, dst)\
- "packssdw " #s0 ", " #s0 " \n\t" /* C0, c0, C0, c0 */\
- "packssdw " #s7 ", " #s7 " \n\t" /* C7, c7, C7, c7 */\
- "movd " #s0 ", " #dst " \n\t" /* C0, c0 */\
- "movd " #s7 ", 112+" #dst " \n\t" /* C7, c7 */
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+ROW_IDCT( (%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11)
+/*ROW_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1), paddd (%2), 11)
+ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1), paddd (%2), 11)
+ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1), paddd (%2), 11)*/
-#define WRITE1(s1, s6, dst, tmp)\
- "packssdw " #s1 ", " #s1 " \n\t" /* C1, c1, C1, c1 */\
- "packssdw " #s6 ", " #s6 " \n\t" /* C6, c6, C6, c6 */\
- "movd " #s1 ", 16+" #dst " \n\t" /* C1, c1 */\
- "movd " #s6 ", 96+" #dst " \n\t" /* C6, c6 */
+DC_COND_ROW_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11)
+DC_COND_ROW_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11)
+DC_COND_ROW_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11)
-#define WRITE2(s2, s5, s3, s4, dst)\
- "packssdw " #s2 ", " #s2 " \n\t" /* C2, c2, C2, c2 */\
- "packssdw " #s3 ", " #s3 " \n\t" /* C3, c3, C3, c3 */\
- "movd " #s2 ", 32+" #dst " \n\t" /* C2, c2 */\
- "movd " #s3 ", 48+" #dst " \n\t" /* C3, c3 */\
- "packssdw " #s4 ", " #s4 " \n\t" /* C4, c4, C4, c4 */\
- "packssdw " #s5 ", " #s5 " \n\t" /* C5, c5, C5, c5 */\
- "movd " #s4 ", 64+" #dst " \n\t" /* C4, c4 */\
- "movd " #s5 ", 80+" #dst " \n\t" /* C5, c5 */\
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+COL_IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+COL_IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+COL_IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+COL_IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
#else
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
+#define DC_COND_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
- #rounder ", %%mm4 \n\t"\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B0 b0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE0(%%mm6, %%mm4, dst) \
-\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
- "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A1 a1 */\
- #rounder ", %%mm4 \n\t"\
-\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE1(%%mm6, %%mm4, dst, %%mm7) \
-\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
- #rounder ", %%mm4 \n\t"\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
- #rounder ", %%mm0 \n\t"\
- "paddd %%mm3, %%mm2 \n\t" /* B3 b3 */\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
- "psrad $" #shift ", %%mm2 \n\t"\
- "psrad $" #shift ", %%mm0 \n\t"\
- WRITE2(%%mm6, %%mm4, %%mm2, %%mm0, dst)
-
-#define DC_COND_IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
- "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
- "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
- "movq wm1010, %%mm4 \n\t"\
+ "movq "MANGLE(wm1010)", %%mm4 \n\t"\
"pand %%mm0, %%mm4 \n\t"\
"por %%mm1, %%mm4 \n\t"\
"por %%mm2, %%mm4 \n\t"\
@@ -536,920 +476,822 @@ IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"movd %%mm4, %%eax \n\t"\
"orl %%eax, %%eax \n\t"\
"jz 1f \n\t"\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
#rounder ", %%mm4 \n\t"\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B0 b0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
"paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE0(%%mm6, %%mm4, dst) \
-\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
- "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A1 a1 */\
- #rounder ", %%mm4 \n\t"\
-\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE1(%%mm6, %%mm4, dst, %%mm7) \
-\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
- #rounder ", %%mm4 \n\t"\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
- #rounder ", %%mm0 \n\t"\
- "paddd %%mm3, %%mm2 \n\t" /* B3 b3 */\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
"psrad $" #shift ", %%mm2 \n\t"\
"psrad $" #shift ", %%mm0 \n\t"\
- WRITE2(%%mm6, %%mm4, %%mm2, %%mm0, dst)\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
"jmp 2f \n\t"\
- "#.balign 16 \n\t"\
"1: \n\t"\
- WRITE3(%%mm0, dst)\
- "2: \n\t"\
+ "pslld $16, %%mm0 \n\t"\
+ "paddd "MANGLE(d40000)", %%mm0 \n\t"\
+ "psrad $13, %%mm0 \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t"\
+ "movq %%mm0, " #dst " \n\t"\
+ "movq %%mm0, 8+" #dst " \n\t"\
+ "movq %%mm0, 16+" #dst " \n\t"\
+ "movq %%mm0, 24+" #dst " \n\t"\
+ "2: \n\t"
-#define Z_COND_IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift, bt) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
+#define Z_COND_IDCT(src0, src4, src1, src5, dst, rounder, shift, bt) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
"movq %%mm0, %%mm4 \n\t"\
"por %%mm1, %%mm4 \n\t"\
"por %%mm2, %%mm4 \n\t"\
"por %%mm3, %%mm4 \n\t"\
- "packssdw %%mm4, %%mm4 \n\t"\
+ "packssdw %%mm4,%%mm4 \n\t"\
"movd %%mm4, %%eax \n\t"\
"orl %%eax, %%eax \n\t"\
"jz " #bt " \n\t"\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
#rounder ", %%mm4 \n\t"\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B0 b0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
"paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE0(%%mm6, %%mm4, dst) \
-\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
- "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A1 a1 */\
- #rounder ", %%mm4 \n\t"\
-\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE1(%%mm6, %%mm4, dst, %%mm7) \
-\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
- #rounder ", %%mm4 \n\t"\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
- #rounder ", %%mm0 \n\t"\
- "paddd %%mm3, %%mm2 \n\t" /* B3 b3 */\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
"psrad $" #shift ", %%mm2 \n\t"\
"psrad $" #shift ", %%mm0 \n\t"\
- WRITE2(%%mm6, %%mm4, %%mm2, %%mm0, dst)\
-
-
-#define WRITE0(s0, s7, dst)\
- "movq " #s0 ", " #dst " \n\t" /* R0 r0 */\
- "movq " #s7 ", 24+" #dst " \n\t" /* R7 r7 */
-
-#define WRITE1(s1, s6, dst, tmp)\
- "movq " #dst ", " #tmp " \n\t" /* R0 r0 */\
- "packssdw " #s1 ", " #tmp " \n\t" /* R1 r1 R0 r0*/\
- "movq " #tmp ", " #dst " \n\t"\
- "movq 24+" #dst ", " #tmp " \n\t" /* R7 r7 */\
- "packssdw " #tmp ", " #s6 " \n\t" /* R7 r7 R6 r6*/\
- "movq " #s6 ", 24+" #dst " \n\t"
-
-#define WRITE2(s2, s5, s3, s4, dst)\
- "packssdw " #s3 ", " #s2 " \n\t" /* R3 r3 R2 r2*/\
- "packssdw " #s5 ", " #s4 " \n\t" /* R5 r5 R4 r4*/\
- "movq " #s2 ", 8+" #dst " \n\t"\
- "movq " #s4 ", 16+" #dst " \n\t"
-
-#define WRITE3(a, dst)\
- "pslld $16, " #a " \n\t"\
- "paddd d40000, " #a " \n\t"\
- "psrad $13, " #a " \n\t"\
- "packssdw " #a ", " #a " \n\t"\
- "movq " #a ", " #dst " \n\t"\
- "movq " #a ", 8+" #dst " \n\t"\
- "movq " #a ", 16+" #dst " \n\t"\
- "movq " #a ", 24+" #dst " \n\t"\
-
-#define WRITE0b(s0, s7, dst)\
- "packssdw " #s0 ", " #s0 " \n\t" /* C0, c0, C0, c0 */\
- "packssdw " #s7 ", " #s7 " \n\t" /* C7, c7, C7, c7 */\
- "movd " #s0 ", " #dst " \n\t" /* C0, c0 */\
- "movd " #s7 ", 112+" #dst " \n\t" /* C7, c7 */
-
-#define WRITE1b(s1, s6, dst, tmp)\
- "packssdw " #s1 ", " #s1 " \n\t" /* C1, c1, C1, c1 */\
- "packssdw " #s6 ", " #s6 " \n\t" /* C6, c6, C6, c6 */\
- "movd " #s1 ", 16+" #dst " \n\t" /* C1, c1 */\
- "movd " #s6 ", 96+" #dst " \n\t" /* C6, c6 */
-
-#define WRITE2b(s2, s5, s3, s4, dst)\
- "packssdw " #s2 ", " #s2 " \n\t" /* C2, c2, C2, c2 */\
- "packssdw " #s3 ", " #s3 " \n\t" /* C3, c3, C3, c3 */\
- "movd " #s2 ", 32+" #dst " \n\t" /* C2, c2 */\
- "movd " #s3 ", 48+" #dst " \n\t" /* C3, c3 */\
- "packssdw " #s4 ", " #s4 " \n\t" /* C4, c4, C4, c4 */\
- "packssdw " #s5 ", " #s5 " \n\t" /* C5, c5, C5, c5 */\
- "movd " #s4 ", 64+" #dst " \n\t" /* C4, c4 */\
- "movd " #s5 ", 80+" #dst " \n\t" /* C5, c5 */\
-
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-DC_COND_IDCT_CORE( 0(%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11)
-Z_COND_IDCT_CORE( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11, 4f)
-Z_COND_IDCT_CORE( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 2f)
-Z_COND_IDCT_CORE( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 1f)
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
-#undef IDCT_CORE
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
+#define ROW_IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B0 b0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq 56(%2), %%mm5 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ #rounder ", %%mm0 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm0, %%mm0 \n\t" \
+ "psubd %%mm1, %%mm0 \n\t" /* A2 a2 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm5, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm5 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm5 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
"paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm5 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE0b(%%mm6, %%mm4, dst) \
-\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
- "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A1 a1 */\
-\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
+ "movq %%mm1, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm5, %%mm1 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm5, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm7 \n\t" /* A1+B1 a1+b1 A0+B0 a0+b0 */\
+ "packssdw %%mm4, %%mm2 \n\t" /* A0-B0 a0-b0 A1-B1 a1-b1 */\
+ "movq %%mm7, " #dst " \n\t"\
+ "movq " #src1 ", %%mm1 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "movq %%mm2, 24+" #dst " \n\t"\
+ "pmaddwd %%mm1, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm1 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm0, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm0 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm1, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
"psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm6, %%mm2 \n\t" /* A3+B3 a3+b3 A2+B2 a2+b2 */\
+ "movq %%mm2, 8+" #dst " \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE1b(%%mm6, %%mm4, dst, %%mm7) \
-\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "packssdw %%mm0, %%mm4 \n\t" /* A2-B2 a2-b2 A3-B3 a3-b3 */\
+ "movq %%mm4, 16+" #dst " \n\t"\
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+DC_COND_IDCT( 0(%0), 8(%0), 16(%0), 24(%0), 0(%1),paddd 8(%2), 11)
+Z_COND_IDCT( 32(%0), 40(%0), 48(%0), 56(%0), 32(%1),paddd (%2), 11, 4f)
+Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 2f)
+Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 1f)
+
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
+ "movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ #rounder ", %%mm0 \n\t"\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
+ "psrad $" #shift ", %%mm4 \n\t"\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
"psrad $" #shift ", %%mm6 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
- "paddd %%mm3, %%mm2 \n\t" /* B3 b3 */\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
- "psrad $" #shift ", %%mm2 \n\t"\
- "psrad $" #shift ", %%mm0 \n\t"\
- WRITE2b(%%mm6, %%mm4, %%mm2, %%mm0, dst)
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"jmp 9f \n\t"
"#.balign 16 \n\t"\
"4: \n\t"
-Z_COND_IDCT_CORE( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 6f)
-Z_COND_IDCT_CORE( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f)
+Z_COND_IDCT( 64(%0), 72(%0), 80(%0), 88(%0), 64(%1),paddd (%2), 11, 6f)
+Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 5f)
-#undef IDCT_CORE
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ #rounder ", %%mm0 \n\t"\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm7 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE0b(%%mm7, %%mm4, dst) \
-\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
"movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A1 a1 */\
-\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm4, %%mm7 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm7, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm7 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE1b(%%mm7, %%mm4, dst, %%mm6) \
-\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm4, %%mm7 \n\t" /* A1+B1 a1+b1 */\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm7, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
- "psrad $" #shift ", %%mm7 \n\t"\
+ "paddd %%mm4, %%mm1 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm1, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
- "paddd %%mm0, %%mm3 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm3, %%mm0 \n\t" /* A3-B3 a3-b3 */\
- "psrad $" #shift ", %%mm3 \n\t"\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm7, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm7, %%mm2 \n\t" /* A1-B1 a1-b1 */\
"psrad $" #shift ", %%mm0 \n\t"\
- WRITE2b(%%mm7, %%mm4, %%mm3, %%mm0, dst)
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm1, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq 88(%2), %%mm1 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm1, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm1, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm1 \n\t" /* A3 a3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm1 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm1, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"jmp 9f \n\t"
"#.balign 16 \n\t"\
"6: \n\t"
-Z_COND_IDCT_CORE( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f)
+Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 7f)
-#undef IDCT_CORE
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm7 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE0b(%%mm7, %%mm4, dst) \
-\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ #rounder ", %%mm0 \n\t"\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
"movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
-\
- "movq 80(%2), %%mm4 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm5, %%mm7 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm5, %%mm5 \n\t" /* 2A1 2a1 */\
- "psubd %%mm7, %%mm5 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm7 \n\t"\
- "psrad $" #shift ", %%mm5 \n\t"\
- WRITE1b(%%mm7, %%mm5, dst, %%mm6) \
-\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm4, %%mm7 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm7, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
- "psrad $" #shift ", %%mm7 \n\t"\
+ "paddd %%mm4, %%mm1 \n\t" /* A0+B0 a0+b0 */\
+ "paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
+ "psubd %%mm1, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm0, %%mm3 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm3, %%mm0 \n\t" /* A3-B3 a3-b3 */\
- "psrad $" #shift ", %%mm3 \n\t"\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm7, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm7, %%mm2 \n\t" /* A1-B1 a1-b1 */\
"psrad $" #shift ", %%mm0 \n\t"\
- WRITE2b(%%mm7, %%mm4, %%mm3, %%mm0, dst)
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm1, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq 88(%2), %%mm1 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm1, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm1, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm1 \n\t" /* A3 a3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm1 \n\t" /* a3-B3 a3-b3 */\
+ "psrad $" #shift ", %%mm6 \n\t"\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm1, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"jmp 9f \n\t"
"#.balign 16 \n\t"\
"2: \n\t"
-Z_COND_IDCT_CORE( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f)
+Z_COND_IDCT( 96(%0),104(%0),112(%0),120(%0), 96(%1),paddd (%2), 11, 3f)
-#undef IDCT_CORE
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
"movq " #src5 ", %%mm3 \n\t" /* R7 R5 r7 r5 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
- "movq 40(%2), %%mm7 \n\t" /* C7 C5 C7 C5 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B0 b0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ #rounder ", %%mm0 \n\t"\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 56(%2), %%mm1 \n\t" /* C7 C5 C7 C5 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* C7R7+C5R5 C7r7+C5r5 */\
+ "pmaddwd 64(%2), %%mm2 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm1, %%mm7 \n\t" /* B0 b0 */\
+ "movq 72(%2), %%mm1 \n\t" /* -C5 -C1 -C5 -C1 */\
+ "pmaddwd %%mm3, %%mm1 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
"paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "paddd %%mm2, %%mm1 \n\t" /* B1 b1 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE0b(%%mm6, %%mm4, dst) \
-\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
- "movq 72(%2), %%mm7 \n\t" /* -C5 -C1 -C5 -C1 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /* -C5R7-C1R5 -C5r7-C1r5 */\
-\
- "movq 80(%2), %%mm4 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm5, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm5, %%mm5 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm5 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm5 \n\t"\
- WRITE1b(%%mm6, %%mm5, dst, %%mm7) \
-\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
- "movq 104(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "movq %%mm0, %%mm2 \n\t" /* A1 a1 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm1, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm2, %%mm2 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm2, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq " #src1 ", %%mm0 \n\t" /* R3 R1 r3 r1 */\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "movq 88(%2), %%mm7 \n\t" /* C3 C7 C3 C7 */\
+ "pmaddwd 96(%2), %%mm0 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
"pmaddwd %%mm3, %%mm7 \n\t" /* C3R7+C7R5 C3r7+C7r5 */\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm7, %%mm6 \n\t" /* B1 b1 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
- "pmaddwd 136(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "movq %%mm5, %%mm2 \n\t" /* A2 a2 */\
+ "pmaddwd 104(%2), %%mm3 \n\t" /* -C1R7+C3R5 -C1r7+C3r5 */\
+ "paddd %%mm7, %%mm4 \n\t" /* B2 b2 */\
+ "paddd %%mm4, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm0, %%mm3 \n\t" /* B3 b3 */\
+ "paddd %%mm3, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm3, %%mm4 \n\t" /* a3-B3 a3-b3 */\
"psrad $" #shift ", %%mm6 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm3, %%mm2 \n\t" /* B3 b3 */\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
- "psrad $" #shift ", %%mm2 \n\t"\
- "psrad $" #shift ", %%mm0 \n\t"\
- WRITE2b(%%mm6, %%mm4, %%mm2, %%mm0, dst)
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+ "packssdw %%mm2, %%mm2 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm2, 32+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"jmp 9f \n\t"
"#.balign 16 \n\t"\
"3: \n\t"
-#undef IDCT_CORE
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ #rounder ", %%mm0 \n\t"\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 64(%2), %%mm3 \n\t"\
+ "pmaddwd %%mm2, %%mm3 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
"paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE0b(%%mm6, %%mm4, dst) \
-\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
-\
- "movq 80(%2), %%mm4 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm5, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm5, %%mm5 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm5 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "movq %%mm0, %%mm1 \n\t" /* A1 a1 */\
+ "paddd %%mm3, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm3, %%mm1 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm1, %%mm1 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm1, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm2, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "pmaddwd 96(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "movq %%mm5, %%mm1 \n\t" /* A2 a2 */\
+ "paddd %%mm4, %%mm1 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm1 \n\t"\
"psrad $" #shift ", %%mm5 \n\t"\
- WRITE1b(%%mm6, %%mm5, dst, %%mm7) \
-\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm2, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm2, %%mm4 \n\t" /* a3-B3 a3-b3 */\
"psrad $" #shift ", %%mm6 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
- "psrad $" #shift ", %%mm2 \n\t"\
- "psrad $" #shift ", %%mm0 \n\t"\
- WRITE2b(%%mm6, %%mm4, %%mm2, %%mm0, dst)
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+ "packssdw %%mm1, %%mm1 \n\t" /* A2+B2 a2+b2 */\
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm1, 32+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"jmp 9f \n\t"
"#.balign 16 \n\t"\
"5: \n\t"
-#undef IDCT_CORE
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "movq %%mm4, %%mm6\n\t"\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "movq %%mm5, %%mm7\n\t"\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 8+" #src0 ", %%mm2 \n\t" /*2R2 R0 r2 r0 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /*2C2R2+C4R0 C2r2+C4r0 */\
- "movq 8+" #src4 ", %%mm3 \n\t" /*2R6 R4 r6 r4 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /*2C6R6+C4R4 C6r6+C4r4 */\
-\
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
+ #rounder ", %%mm0 \n\t"\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\
+ "movq 8+" #src4 ", %%mm3 \n\t" /* R6 R2 r6 r2 */\
+ "movq 16(%2), %%mm1 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm2, %%mm1 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm3, %%mm7 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "pmaddwd 40(%2), %%mm3 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ #rounder ", %%mm1 \n\t"\
+ "paddd %%mm1, %%mm7 \n\t" /* A0 a0 */\
+ "paddd %%mm1, %%mm1 \n\t" /* 2C0 2c0 */\
+ #rounder ", %%mm2 \n\t"\
+ "psubd %%mm7, %%mm1 \n\t" /* A3 a3 */\
+ "paddd %%mm2, %%mm3 \n\t" /* A1 a1 */\
+ "paddd %%mm2, %%mm2 \n\t" /* 2C1 2c1 */\
+ "psubd %%mm3, %%mm2 \n\t" /* A2 a2 */\
"psrad $" #shift ", %%mm4 \n\t"\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
-\
- "paddd %%mm7, %%mm6 \n\t" /*2A0 a0 */\
- "movq 56(%2), %%mm7 \n\t" /* -C2 -C4 -C2 -C4 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "pmaddwd %%mm1, %%mm7 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
-\
- "packssdw %%mm6, %%mm4 \n\t" /* C0, c0, C0, c0 */\
- "movq 48(%2), %%mm6 \n\t" /* C6 C4 C6 C4 */\
- "movq %%mm4, " #dst " \n\t" /* C0, c0 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /*2C6R2+C4R0 C6r2+C4r0 */\
-\
- "movq %%mm4, 112+" #dst " \n\t" /* C0, c0 */\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm3, %%mm4 \n\t" /*2-C2R6-C4R4 -C2r6-C4r4 */\
-\
- "paddd %%mm5, %%mm7 \n\t" /* A1 a1 */\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
"psrad $" #shift ", %%mm7 \n\t"\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
-\
- "paddd %%mm4, %%mm6 \n\t" /*2A1 a1 */\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
-\
- "psrad $" #shift ", %%mm6 \n\t"\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
-\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "packssdw %%mm6, %%mm7 \n\t" /* C1, c1, C1, c1 */\
-\
- "movq 80(%2), %%mm6 \n\t" /* -C6 C4 -C6 C4 */\
- "movq %%mm7, 16+" #dst " \n\t" /* C1, c1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /*2-C6R2+C4R0 -C6r2+C4r0 */\
-\
- "movq %%mm7, 96+" #dst " \n\t" /* C1, c1 */\
- "movq 88(%2), %%mm7 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm3, %%mm7 \n\t" /*2C2R6-C4R4 C2r6-C4r4 */\
-\
- "pmaddwd 112(%2), %%mm2 \n\t" /*2-C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
-\
- "pmaddwd 120(%2), %%mm3 \n\t" /*2-C6R6+C4R4 -C6r6+C4r4 */\
- "psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm7, %%mm6 \n\t" /*2A2 a2 */\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
-\
- "psrad $" #shift ", %%mm6 \n\t"\
-\
- "packssdw %%mm6, %%mm4 \n\t" /* C2, c2, C2, c2 */\
- "movq %%mm4, 32+" #dst " \n\t" /* C2, c2 */\
+ "psrad $" #shift ", %%mm3 \n\t"\
+ "packssdw %%mm7, %%mm4 \n\t" /* A0 a0 */\
+ "movq %%mm4, " #dst " \n\t"\
"psrad $" #shift ", %%mm0 \n\t"\
- "paddd %%mm3, %%mm2 \n\t" /*2A3 a3 */\
-\
- "movq %%mm4, 80+" #dst " \n\t" /* C2, c2 */\
+ "packssdw %%mm3, %%mm0 \n\t" /* A1 a1 */\
+ "movq %%mm0, 16+" #dst " \n\t"\
+ "movq %%mm0, 96+" #dst " \n\t"\
+ "movq %%mm4, 112+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "psrad $" #shift ", %%mm6 \n\t"\
"psrad $" #shift ", %%mm2 \n\t"\
-\
- "packssdw %%mm2, %%mm0 \n\t" /* C3, c3, C3, c3 */\
- "movq %%mm0, 48+" #dst " \n\t" /* C3, c3 */\
- "movq %%mm0, 64+" #dst " \n\t" /* C3, c3 */\
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-//IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-//IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+ "packssdw %%mm2, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movq %%mm5, 32+" #dst " \n\t"\
+ "psrad $" #shift ", %%mm1 \n\t"\
+ "packssdw %%mm1, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movq %%mm6, 48+" #dst " \n\t"\
+ "movq %%mm6, 64+" #dst " \n\t"\
+ "movq %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"jmp 9f \n\t"
"#.balign 16 \n\t"\
"1: \n\t"
-#undef IDCT_CORE
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq " #src4 ", %%mm1 \n\t" /* R6 R4 r6 r4 */\
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq " #src4 ", %%mm1 \n\t" /* R6 R2 r6 r2 */\
"movq " #src1 ", %%mm2 \n\t" /* R3 R1 r3 r1 */\
- "movq 16(%2), %%mm4 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 24(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C4R4 C6r6+C4r4 */\
- "movq 32(%2), %%mm6 \n\t" /* C3 C1 C3 C1 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm5 \n\t" /* C6 C2 C6 C2 */\
+ "pmaddwd %%mm1, %%mm5 \n\t" /* C6R6+C2R2 C6r6+C2r2 */\
+ "movq 40(%2), %%mm6 \n\t" /* -C2 C6 -C2 C6 */\
+ "pmaddwd %%mm6, %%mm1 \n\t" /* -C2R6+C6R2 -C2r6+C6r2 */\
+ #rounder ", %%mm4 \n\t"\
+ "movq %%mm4, %%mm6 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 48(%2), %%mm7 \n\t" /* C3 C1 C3 C1 */\
+ #rounder ", %%mm0 \n\t"\
+ "pmaddwd %%mm2, %%mm7 \n\t" /* C3R3+C1R1 C3r3+C1r1 */\
"paddd %%mm5, %%mm4 \n\t" /* A0 a0 */\
-\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A0+B0 a0+b0 */\
+ "psubd %%mm5, %%mm6 \n\t" /* A3 a3 */\
+ "movq %%mm0, %%mm5 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1 a1 */\
+ "psubd %%mm1, %%mm5 \n\t" /* A2 a2 */\
+ "movq 64(%2), %%mm1 \n\t"\
+ "pmaddwd %%mm2, %%mm1 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
+ "paddd %%mm4, %%mm7 \n\t" /* A0+B0 a0+b0 */\
"paddd %%mm4, %%mm4 \n\t" /* 2A0 2a0 */\
- "psubd %%mm6, %%mm4 \n\t" /* A0-B0 a0-b0 */\
- "psrad $" #shift ", %%mm6 \n\t"\
- "psrad $" #shift ", %%mm4 \n\t"\
- WRITE0b(%%mm6, %%mm4, dst) \
-\
- "movq 56(%2), %%mm4 \n\t" /* -C2 -C4 -C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* -C2R6-C4R4 -C2r6-C4r4 */\
- "movq 64(%2), %%mm6 \n\t" /* -C7 C3 -C7 C3 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C7R3+C3R1 -C7r3+C3r1 */\
- "paddd %%mm5, %%mm4 \n\t" /* A1 a1 */\
-\
- "movq 80(%2), %%mm5 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm5 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "psrad $" #shift ", %%mm6 \n\t"\
+ "psubd %%mm7, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "psrad $" #shift ", %%mm7 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- WRITE1b(%%mm6, %%mm4, dst, %%mm7) \
-\
- "movq 88(%2), %%mm4 \n\t" /* C2 -C4 C2 -C4 */\
- "pmaddwd %%mm1, %%mm4 \n\t" /* C2R6-C4R4 C2r6-C4r4 */\
- "movq 96(%2), %%mm6 \n\t" /* -C1 C5 -C1 C5 */\
- "pmaddwd %%mm2, %%mm6 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
- "paddd %%mm5, %%mm4 \n\t" /* A2 a2 */\
-\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "paddd %%mm4, %%mm6 \n\t" /* A1+B1 a1+b1 */\
- "pmaddwd 120(%2), %%mm1 \n\t" /* -C6R6+C4R4 -C6r6+C4r4 */\
- "paddd %%mm4, %%mm4 \n\t" /* 2A1 2a1 */\
- "psubd %%mm6, %%mm4 \n\t" /* A1-B1 a1-b1 */\
- "pmaddwd 128(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "movq %%mm0, %%mm3 \n\t" /* A1 a1 */\
+ "paddd %%mm1, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "psubd %%mm1, %%mm3 \n\t" /* A1-B1 a1-b1 */\
+ "psrad $" #shift ", %%mm0 \n\t"\
+ "psrad $" #shift ", %%mm3 \n\t"\
+ "packssdw %%mm7, %%mm7 \n\t" /* A0+B0 a0+b0 */\
+ "movd %%mm7, " #dst " \n\t"\
+ "packssdw %%mm0, %%mm0 \n\t" /* A1+B1 a1+b1 */\
+ "movd %%mm0, 16+" #dst " \n\t"\
+ "packssdw %%mm3, %%mm3 \n\t" /* A1-B1 a1-b1 */\
+ "movd %%mm3, 96+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A0-B0 a0-b0 */\
+ "movd %%mm4, 112+" #dst " \n\t"\
+ "movq 80(%2), %%mm4 \n\t" /* -C1 C5 -C1 C5 */\
+ "pmaddwd %%mm2, %%mm4 \n\t" /* -C1R3+C5R1 -C1r3+C5r1 */\
+ "pmaddwd 96(%2), %%mm2 \n\t" /* -C5R3+C7R1 -C5r3+C7r1 */\
+ "movq %%mm5, %%mm3 \n\t" /* A2 a2 */\
+ "paddd %%mm4, %%mm3 \n\t" /* A2+B2 a2+b2 */\
+ "psubd %%mm4, %%mm5 \n\t" /* a2-B2 a2-b2 */\
+ "psrad $" #shift ", %%mm3 \n\t"\
+ "psrad $" #shift ", %%mm5 \n\t"\
+ "movq %%mm6, %%mm4 \n\t" /* A3 a3 */\
+ "paddd %%mm2, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "psubd %%mm2, %%mm4 \n\t" /* a3-B3 a3-b3 */\
"psrad $" #shift ", %%mm6 \n\t"\
+ "packssdw %%mm3, %%mm3 \n\t" /* A2+B2 a2+b2 */\
+ "movd %%mm3, 32+" #dst " \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
-\
- "paddd %%mm1, %%mm0 \n\t" /* A3 a3 */\
- "paddd %%mm0, %%mm2 \n\t" /* A3+B3 a3+b3 */\
- "paddd %%mm0, %%mm0 \n\t" /* 2A3 2a3 */\
- "psubd %%mm2, %%mm0 \n\t" /* A3-B3 a3-b3 */\
- "psrad $" #shift ", %%mm2 \n\t"\
- "psrad $" #shift ", %%mm0 \n\t"\
- WRITE2b(%%mm6, %%mm4, %%mm2, %%mm0, dst)
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+ "packssdw %%mm6, %%mm6 \n\t" /* A3+B3 a3+b3 */\
+ "movd %%mm6, 48+" #dst " \n\t"\
+ "packssdw %%mm4, %%mm4 \n\t" /* A3-B3 a3-b3 */\
+ "packssdw %%mm5, %%mm5 \n\t" /* A2-B2 a2-b2 */\
+ "movd %%mm4, 64+" #dst " \n\t"\
+ "movd %%mm5, 80+" #dst " \n\t"
+
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+IDCT( (%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
"jmp 9f \n\t"
"#.balign 16 \n\t"
"7: \n\t"
-#undef IDCT_CORE
-#define IDCT_CORE(src0, src4, src1, src5, dst, rounder, shift) \
- "movq " #src0 ", %%mm0 \n\t" /* R2 R0 r2 r0 */\
- "movq 16(%2), %%mm2 \n\t" /* C2 C4 C2 C4 */\
- "movq 8+" #src0 ", %%mm1 \n\t" /* R2 R0 r2 r0 */\
- "pmaddwd %%mm0, %%mm2 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
- "movq 16(%2), %%mm3 \n\t" /* C2 C4 C2 C4 */\
- "pmaddwd %%mm1, %%mm3 \n\t" /* C2R2+C4R0 C2r2+C4r0 */\
-\
- "movq 48(%2), %%mm4 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm0, %%mm4 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "movq 48(%2), %%mm5 \n\t" /* C6 C4 C6 C4 */\
- "pmaddwd %%mm1, %%mm5 \n\t" /* C6R2+C4R0 C6r2+C4r0 */\
- "movq 80(%2), %%mm6 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm0, %%mm6 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "movq 80(%2), %%mm7 \n\t" /* -C6 C4 -C6 C4 */\
- "pmaddwd %%mm1, %%mm7 \n\t" /* -C6R2+C4R0 -C6r2+C4r0 */\
- "pmaddwd 112(%2), %%mm0 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "psrad $" #shift ", %%mm2 \n\t"\
- "psrad $" #shift ", %%mm3 \n\t"\
- "pmaddwd 112(%2), %%mm1 \n\t" /* -C2R2+C4R0 -C2r2+C4r0 */\
- "packssdw %%mm3, %%mm2 \n\t" /* C0, c0, C0, c0 */\
- "movq %%mm2, " #dst " \n\t" /* C0, c0 */\
+#undef IDCT
+#define IDCT(src0, src4, src1, src5, dst, rounder, shift) \
+ "movq " #src0 ", %%mm0 \n\t" /* R4 R0 r4 r0 */\
+ "movq 16(%2), %%mm4 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm0, %%mm4 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm5 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm5, %%mm0 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ #rounder ", %%mm4 \n\t"\
+ #rounder ", %%mm0 \n\t"\
"psrad $" #shift ", %%mm4 \n\t"\
- "psrad $" #shift ", %%mm5 \n\t"\
- "movq %%mm2, 112+" #dst " \n\t" /* C0, c0 */\
- "packssdw %%mm5, %%mm4 \n\t" /* C1, c1, C1, c1 */\
- "movq %%mm4, 16+" #dst " \n\t" /* C0, c0 */\
- "psrad $" #shift ", %%mm7 \n\t"\
- "psrad $" #shift ", %%mm6 \n\t"\
- "movq %%mm4, 96+" #dst " \n\t" /* C0, c0 */\
- "packssdw %%mm7, %%mm6 \n\t" /* C2, c2, C2, c2 */\
- "movq %%mm6, 32+" #dst " \n\t" /* C0, c0 */\
"psrad $" #shift ", %%mm0 \n\t"\
- "movq %%mm6, 80+" #dst " \n\t" /* C0, c0 */\
+ "movq 8+" #src0 ", %%mm2 \n\t" /* R4 R0 r4 r0 */\
+ "movq 16(%2), %%mm1 \n\t" /* C4 C4 C4 C4 */\
+ "pmaddwd %%mm2, %%mm1 \n\t" /* C4R4+C4R0 C4r4+C4r0 */\
+ "movq 24(%2), %%mm7 \n\t" /* -C4 C4 -C4 C4 */\
+ "pmaddwd %%mm7, %%mm2 \n\t" /* -C4R4+C4R0 -C4r4+C4r0 */\
+ "movq 32(%2), %%mm7 \n\t" /* C6 C2 C6 C2 */\
+ #rounder ", %%mm1 \n\t"\
+ #rounder ", %%mm2 \n\t"\
"psrad $" #shift ", %%mm1 \n\t"\
- "packssdw %%mm1, %%mm0 \n\t" /* C3, c3, C3, c3 */\
- "movq %%mm0, 48+" #dst " \n\t" /* C0, c0 */\
- "movq %%mm0, 64+" #dst " \n\t" /* C0, c0 */\
-
-//IDCT_CORE( src0, src4, src1, src5, dst, rounder, shift)
-IDCT_CORE( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
-//IDCT_CORE( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
-IDCT_CORE( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
-//IDCT_CORE( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
+ "packssdw %%mm1, %%mm4 \n\t" /* A0 a0 */\
+ "movq %%mm4, " #dst " \n\t"\
+ "psrad $" #shift ", %%mm2 \n\t"\
+ "packssdw %%mm2, %%mm0 \n\t" /* A1 a1 */\
+ "movq %%mm0, 16+" #dst " \n\t"\
+ "movq %%mm0, 96+" #dst " \n\t"\
+ "movq %%mm4, 112+" #dst " \n\t"\
+ "movq %%mm0, 32+" #dst " \n\t"\
+ "movq %%mm4, 48+" #dst " \n\t"\
+ "movq %%mm4, 64+" #dst " \n\t"\
+ "movq %%mm0, 80+" #dst " \n\t"
+
+//IDCT( src0, src4, src1, src5, dst, rounder, shift)
+IDCT( 0(%1), 64(%1), 32(%1), 96(%1), 0(%0),/nop, 20)
+//IDCT( 8(%1), 72(%1), 40(%1), 104(%1), 4(%0),/nop, 20)
+IDCT( 16(%1), 80(%1), 48(%1), 112(%1), 8(%0),/nop, 20)
+//IDCT( 24(%1), 88(%1), 56(%1), 120(%1), 12(%0),/nop, 20)
#endif
/*
Input
- 00 20 02 22 40 60 42 62
- 10 30 12 32 50 70 52 72
- 01 21 03 23 41 61 43 63
+ 00 40 04 44 20 60 24 64
+ 10 30 14 34 50 70 54 74
+ 01 41 03 43 21 61 23 63
11 31 13 33 51 71 53 73
- 04 24 06 26 44 64 46 66
- 14 34 16 36 54 74 56 76
-...
-*/
-/*
+ 02 42 06 46 22 62 26 66
+ 12 32 16 36 52 72 56 76
+ 05 45 07 47 25 65 27 67
+ 15 35 17 37 55 75 57 77
+
Temp
- 00 02 10 12 20 22 30 32
- 40 42 50 52 60 62 70 72
+ 00 04 10 14 20 24 30 34
+ 40 44 50 54 60 64 70 74
01 03 11 13 21 23 31 33
41 43 51 53 61 63 71 73
- 04 06 14 16 24 26 34 36
- 44 46 54 56 64 66 74 76
+ 02 06 12 16 22 26 32 36
+ 42 46 52 56 62 66 72 76
05 07 15 17 25 27 35 37
45 47 55 57 65 67 75 77
*/
-/*
-Output
- 00 10 20 30 40 50 60 70
- 01 11 21 31 41 51 61 71
-...
-*/
-
"9: \n\t"
:: "r" (block), "r" (temp), "r" (coeffs)
: "%eax"
);
-/*
-idctCol(block, temp);
-idctCol(block+1, temp+2);
-idctCol(block+2, temp+4);
-idctCol(block+3, temp+6);
-idctCol(block+4, temp+8);
-idctCol(block+5, temp+10);
-idctCol(block+6, temp+12);
-idctCol(block+7, temp+14);
-*/
}
void simple_idct_mmx(int16_t *block)
{
- static int imax=0, imin=0;
- static int omax=0, omin=0;
- int i, j;
-/*
- for(i=0; i<64; i++)
- {
- if(block[i] > imax)
- {
- imax= block[i];
- printf("Input-Max: %d\n", imax);
- printf("Input-Min: %d\n", imin);
- printf("Output-Max: %d\n", omax);
- printf("Output-Min: %d\n", omin);
- }
- if(block[i] < imin)
- {
- imin= block[i];
- printf("Input-Max: %d\n", imax);
- printf("Input-Min: %d\n", imin);
- printf("Output-Max: %d\n", omax);
- printf("Output-Min: %d\n", omin);
- }
- }*/
-/* static int stat[64];
- for(j=0; j<4; j++)
- {
- static int line[8]={0,2,1,3,4,6,5,7};
- for(i=0; i<16; i++)
- {
- if(block[j*16+i])
- {
- stat[j*16+1]++;
- break;
- }
- }
- for(i=0; i<16; i++)
- {
- if(block[j*16+i] && i!=0 && i!=2)
- {
- stat[j*16+2]++;
- break;
- }
- }
- }
- stat[0]++;*/
-/* for(i=1; i<8; i++)
- {
- if(block[i] != 0)
- {
- stat[1]++;
- break;
- }
- }
- for(i=32; i<64; i++)
- {
- if(block[i] != 0)
- {
- stat[2]++;
- break;
- }
- }
- stat[0]++;
-*/
-// return;
idct(block);
-// memset(block, 0, 128);
-/*
- if(stat[0] > 100000)
- for(i=0; i<64; i++)
- {
- if((i&7) == 0) printf("\n");
- printf("%06d ", stat[i]);
- }
-*/
-/*
- for(i=0; i<4; i++) printf("%d", stat[1+i*16]);
- printf(" ");
- for(i=0; i<4; i++) printf("%d", stat[2+i*16]);
- printf("\n");
-*/
-// printf("%d", stat[2]);
-
-// memset(stat, 0, 256);
-
-/*
- for(i=0; i<64; i++)
- {
- if(block[i] > omax)
- {
- omax= block[i];
- printf("Input-Max: %d\n", imax);
- printf("Input-Min: %d\n", imin);
- printf("Output-Max: %d\n", omax);
- printf("Output-Min: %d\n", omin);
- }
- if(block[i] < omin)
- {
- omin= block[i];
- printf("Input-Max: %d\n", imax);
- printf("Input-Min: %d\n", imin);
- printf("Output-Max: %d\n", omax);
- printf("Output-Min: %d\n", omin);
- }
- }*/
}