summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorStephen Torri <storri@users.sourceforge.net>2002-10-22 05:51:04 +0000
committerStephen Torri <storri@users.sourceforge.net>2002-10-22 05:51:04 +0000
commita3c9d3e5b444435463f3c1f2986b45c38b9df09b (patch)
tree0db7ac623e31de71fbe547574a95f602a3e01660 /src
parent5709b8665722d3cc4a4a4005b14baac9beaa12e6 (diff)
downloadxine-lib-a3c9d3e5b444435463f3c1f2986b45c38b9df09b.tar.gz
xine-lib-a3c9d3e5b444435463f3c1f2986b45c38b9df09b.tar.bz2
Changed comments to standard /* ... */
Changed 'asm' to __asm__ CVS patchset: 2929 CVS date: 2002/10/22 05:51:04
Diffstat (limited to 'src')
-rw-r--r--src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c614
1 files changed, 307 insertions, 307 deletions
diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c
index 10efc173f..80cf233c8 100644
--- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx.c
@@ -38,7 +38,7 @@ extern UINT32 inverse[256];
void MPV_frame_start(MpegEncContext *s)
{
if (s->pict_type == B_TYPE) {
- __asm __volatile(
+ ____asm____ volatile(
"movl (%1), %%eax\n\t"
"movl 4(%1), %%edx\n\t"
"movl 8(%1), %%ecx\n\t"
@@ -50,7 +50,7 @@ void MPV_frame_start(MpegEncContext *s)
:"eax","edx","ecx","memory");
} else {
/* swap next and last */
- __asm __volatile(
+ ____asm____ volatile(
"movl (%1), %%eax\n\t"
"movl 4(%1), %%edx\n\t"
"movl 8(%1), %%ecx\n\t"
@@ -78,7 +78,7 @@ static void dct_unquantize_h263_mmx(MpegEncContext *s,
DCTELEM *block, int n, int qscale)
{
int i, level, qmul, qadd, nCoeffs;
-
+
qmul = s->qscale << 1;
if (s->h263_aic && s->mb_intra)
qadd = 0;
@@ -95,7 +95,7 @@ static void dct_unquantize_h263_mmx(MpegEncContext *s,
for(i=1; i<8; i++) {
level = block[i];
if (level) {
- if (level < 0) {
+ if (level < 0) {
level = level * qmul - qadd;
} else {
level = level * qmul + qadd;
@@ -108,53 +108,53 @@ static void dct_unquantize_h263_mmx(MpegEncContext *s,
i = 0;
nCoeffs= zigzag_end[ s->block_last_index[n] ];
}
-//printf("%d %d ", qmul, qadd);
-asm volatile(
- "movd %1, %%mm6 \n\t" //qmul
- "packssdw %%mm6, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
- "movd %2, %%mm5 \n\t" //qadd
- "pxor %%mm7, %%mm7 \n\t"
- "packssdw %%mm5, %%mm5 \n\t"
- "packssdw %%mm5, %%mm5 \n\t"
- "psubw %%mm5, %%mm7 \n\t"
- "pxor %%mm4, %%mm4 \n\t"
- ".balign 16\n\t"
- "1: \n\t"
- "movq (%0, %3), %%mm0 \n\t"
- "movq 8(%0, %3), %%mm1 \n\t"
-
- "pmullw %%mm6, %%mm0 \n\t"
- "pmullw %%mm6, %%mm1 \n\t"
-
- "movq (%0, %3), %%mm2 \n\t"
- "movq 8(%0, %3), %%mm3 \n\t"
-
- "pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
- "pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
-
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
-
- "paddw %%mm7, %%mm0 \n\t"
- "paddw %%mm7, %%mm1 \n\t"
-
- "pxor %%mm0, %%mm2 \n\t"
- "pxor %%mm1, %%mm3 \n\t"
-
- "pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0
-
- "pandn %%mm2, %%mm0 \n\t"
- "pandn %%mm3, %%mm1 \n\t"
-
- "movq %%mm0, (%0, %3) \n\t"
- "movq %%mm1, 8(%0, %3) \n\t"
-
- "addl $16, %3 \n\t"
- "js 1b \n\t"
- ::"r" (block+nCoeffs), "g"(qmul), "g" (qadd), "r" (2*(i-nCoeffs))
- : "memory"
+/* printf("%d %d ", qmul, qadd); */
+__asm__ volatile(
+ "movd %1, %%mm6 \n\t" /* qmul */
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "movd %2, %%mm5 \n\t" /* qadd */
+ "pxor %%mm7, %%mm7 \n\t"
+ "packssdw %%mm5, %%mm5 \n\t"
+ "packssdw %%mm5, %%mm5 \n\t"
+ "psubw %%mm5, %%mm7 \n\t"
+ "pxor %%mm4, %%mm4 \n\t"
+ ".balign 16\n\t"
+ "1: \n\t"
+ "movq (%0, %3), %%mm0 \n\t"
+ "movq 8(%0, %3), %%mm1 \n\t"
+
+ "pmullw %%mm6, %%mm0 \n\t"
+ "pmullw %%mm6, %%mm1 \n\t"
+
+ "movq (%0, %3), %%mm2 \n\t"
+ "movq 8(%0, %3), %%mm3 \n\t"
+
+ "pcmpgtw %%mm4, %%mm2 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pcmpgtw %%mm4, %%mm3 \n\t" /* block[i] < 0 ? -1 : 0 */
+
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+
+ "paddw %%mm7, %%mm0 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+
+ "pxor %%mm0, %%mm2 \n\t"
+ "pxor %%mm1, %%mm3 \n\t"
+
+ "pcmpeqw %%mm7, %%mm0 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "pcmpeqw %%mm7, %%mm1 \n\t" /* block[i] == 0 ? -1 : 0 */
+
+ "pandn %%mm2, %%mm0 \n\t"
+ "pandn %%mm3, %%mm1 \n\t"
+
+ "movq %%mm0, (%0, %3) \n\t"
+ "movq %%mm1, 8(%0, %3) \n\t"
+
+ "addl $16, %3 \n\t"
+ "js 1b \n\t"
+ ::"r" (block+nCoeffs), "g"(qmul), "g" (qadd), "r" (2*(i-nCoeffs))
+ : "memory"
);
}
@@ -193,123 +193,123 @@ static void dct_unquantize_mpeg1_mmx(MpegEncContext *s,
{
int nCoeffs;
const UINT16 *quant_matrix;
-
+
if(s->alternate_scan) nCoeffs= 64;
else nCoeffs= zigzag_end[ s->block_last_index[n] ];
if (s->mb_intra) {
int block0;
- if (n < 4)
+ if (n < 4)
block0 = block[0] * s->y_dc_scale;
else
block0 = block[0] * s->c_dc_scale;
/* XXX: only mpeg1 */
quant_matrix = s->intra_matrix;
-asm volatile(
- "pcmpeqw %%mm7, %%mm7 \n\t"
- "psrlw $15, %%mm7 \n\t"
- "movd %2, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
+__asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $15, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
"movl %3, %%eax \n\t"
- ".balign 16\n\t"
- "1: \n\t"
- "movq (%0, %%eax), %%mm0 \n\t"
- "movq 8(%0, %%eax), %%mm1 \n\t"
- "movq (%1, %%eax), %%mm4 \n\t"
- "movq 8(%1, %%eax), %%mm5 \n\t"
- "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
- "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
- "pxor %%mm2, %%mm2 \n\t"
- "pxor %%mm3, %%mm3 \n\t"
- "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
- "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
- "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
- "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
- "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
- "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
- "psraw $3, %%mm0 \n\t"
- "psraw $3, %%mm1 \n\t"
- "psubw %%mm7, %%mm0 \n\t"
- "psubw %%mm7, %%mm1 \n\t"
- "por %%mm7, %%mm0 \n\t"
- "por %%mm7, %%mm1 \n\t"
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
- "psubw %%mm2, %%mm0 \n\t"
- "psubw %%mm3, %%mm1 \n\t"
- "pandn %%mm0, %%mm4 \n\t"
- "pandn %%mm1, %%mm5 \n\t"
- "movq %%mm4, (%0, %%eax) \n\t"
- "movq %%mm5, 8(%0, %%eax) \n\t"
-
- "addl $16, %%eax \n\t"
- "js 1b \n\t"
- ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
- : "%eax", "memory"
- );
+ ".balign 16\n\t"
+ "1: \n\t"
+ "movq (%0, %%eax), %%mm0 \n\t"
+ "movq 8(%0, %%eax), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm4 \n\t"
+ "movq 8(%1, %%eax), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" /* q=qscale*quant_matrix[i] */
+ "pmullw %%mm6, %%mm5 \n\t" /* q=qscale*quant_matrix[i] */
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pcmpgtw %%mm1, %%mm3 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" /* abs(block[i]) */
+ "psubw %%mm3, %%mm1 \n\t" /* abs(block[i]) */
+ "pmullw %%mm4, %%mm0 \n\t" /* abs(block[i])*q */
+ "pmullw %%mm5, %%mm1 \n\t" /* abs(block[i])*q */
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" /* FIXME slow */
+ "pcmpeqw (%0, %%eax), %%mm4 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "psraw $3, %%mm0 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+ "psubw %%mm7, %%mm1 \n\t"
+ "por %%mm7, %%mm0 \n\t"
+ "por %%mm7, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "movq %%mm4, (%0, %%eax) \n\t"
+ "movq %%mm5, 8(%0, %%eax) \n\t"
+
+ "addl $16, %%eax \n\t"
+ "js 1b \n\t"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
+ : "%eax", "memory"
+ );
block[0]= block0;
} else {
quant_matrix = s->inter_matrix;
-asm volatile(
- "pcmpeqw %%mm7, %%mm7 \n\t"
- "psrlw $15, %%mm7 \n\t"
- "movd %2, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
+__asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $15, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
"movl %3, %%eax \n\t"
- ".balign 16\n\t"
- "1: \n\t"
- "movq (%0, %%eax), %%mm0 \n\t"
- "movq 8(%0, %%eax), %%mm1 \n\t"
- "movq (%1, %%eax), %%mm4 \n\t"
- "movq 8(%1, %%eax), %%mm5 \n\t"
- "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
- "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
- "pxor %%mm2, %%mm2 \n\t"
- "pxor %%mm3, %%mm3 \n\t"
- "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
- "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
- "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
- "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
- "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
- "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
- "paddw %%mm7, %%mm0 \n\t" // abs(block[i])*2 + 1
- "paddw %%mm7, %%mm1 \n\t" // abs(block[i])*2 + 1
- "pmullw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
- "pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
- "psraw $4, %%mm0 \n\t"
- "psraw $4, %%mm1 \n\t"
- "psubw %%mm7, %%mm0 \n\t"
- "psubw %%mm7, %%mm1 \n\t"
- "por %%mm7, %%mm0 \n\t"
- "por %%mm7, %%mm1 \n\t"
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
- "psubw %%mm2, %%mm0 \n\t"
- "psubw %%mm3, %%mm1 \n\t"
- "pandn %%mm0, %%mm4 \n\t"
- "pandn %%mm1, %%mm5 \n\t"
- "movq %%mm4, (%0, %%eax) \n\t"
- "movq %%mm5, 8(%0, %%eax) \n\t"
-
- "addl $16, %%eax \n\t"
- "js 1b \n\t"
- ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
- : "%eax", "memory"
+ ".balign 16\n\t"
+ "1: \n\t"
+ "movq (%0, %%eax), %%mm0 \n\t"
+ "movq 8(%0, %%eax), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm4 \n\t"
+ "movq 8(%1, %%eax), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" /* q=qscale*quant_matrix[i] */
+ "pmullw %%mm6, %%mm5 \n\t" /* q=qscale*quant_matrix[i] */
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pcmpgtw %%mm1, %%mm3 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" /* abs(block[i]) */
+ "psubw %%mm3, %%mm1 \n\t" /* abs(block[i]) */
+ "paddw %%mm0, %%mm0 \n\t" /* abs(block[i])*2 */
+ "paddw %%mm1, %%mm1 \n\t" /* abs(block[i])*2 */
+ "paddw %%mm7, %%mm0 \n\t" /* abs(block[i])*2 + 1 */
+ "paddw %%mm7, %%mm1 \n\t" /* abs(block[i])*2 + 1 */
+ "pmullw %%mm4, %%mm0 \n\t" /* (abs(block[i])*2 + 1)*q */
+ "pmullw %%mm5, %%mm1 \n\t" /* (abs(block[i])*2 + 1)*q */
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" /* FIXME slow */
+ "pcmpeqw (%0, %%eax), %%mm4 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "psraw $4, %%mm0 \n\t"
+ "psraw $4, %%mm1 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+ "psubw %%mm7, %%mm1 \n\t"
+ "por %%mm7, %%mm0 \n\t"
+ "por %%mm7, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "movq %%mm4, (%0, %%eax) \n\t"
+ "movq %%mm5, 8(%0, %%eax) \n\t"
+
+ "addl $16, %%eax \n\t"
+ "js 1b \n\t"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
+ : "%eax", "memory"
);
}
}
@@ -319,115 +319,115 @@ static void dct_unquantize_mpeg2_mmx(MpegEncContext *s,
{
int nCoeffs;
const UINT16 *quant_matrix;
-
+
if(s->alternate_scan) nCoeffs= 64;
else nCoeffs= zigzag_end[ s->block_last_index[n] ];
if (s->mb_intra) {
int block0;
- if (n < 4)
+ if (n < 4)
block0 = block[0] * s->y_dc_scale;
else
block0 = block[0] * s->c_dc_scale;
quant_matrix = s->intra_matrix;
-asm volatile(
- "pcmpeqw %%mm7, %%mm7 \n\t"
- "psrlw $15, %%mm7 \n\t"
- "movd %2, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
+__asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $15, %%mm7 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
"movl %3, %%eax \n\t"
- ".balign 16\n\t"
- "1: \n\t"
- "movq (%0, %%eax), %%mm0 \n\t"
- "movq 8(%0, %%eax), %%mm1 \n\t"
- "movq (%1, %%eax), %%mm4 \n\t"
- "movq 8(%1, %%eax), %%mm5 \n\t"
- "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
- "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
- "pxor %%mm2, %%mm2 \n\t"
- "pxor %%mm3, %%mm3 \n\t"
- "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
- "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
- "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
- "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
- "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
- "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
- "psraw $3, %%mm0 \n\t"
- "psraw $3, %%mm1 \n\t"
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
- "psubw %%mm2, %%mm0 \n\t"
- "psubw %%mm3, %%mm1 \n\t"
- "pandn %%mm0, %%mm4 \n\t"
- "pandn %%mm1, %%mm5 \n\t"
- "movq %%mm4, (%0, %%eax) \n\t"
- "movq %%mm5, 8(%0, %%eax) \n\t"
-
- "addl $16, %%eax \n\t"
- "js 1b \n\t"
- ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
- : "%eax", "memory"
- );
+ ".balign 16\n\t"
+ "1: \n\t"
+ "movq (%0, %%eax), %%mm0 \n\t"
+ "movq 8(%0, %%eax), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm4 \n\t"
+ "movq 8(%1, %%eax), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" /* q=qscale*quant_matrix[i] */
+ "pmullw %%mm6, %%mm5 \n\t" /* q=qscale*quant_matrix[i] */
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pcmpgtw %%mm1, %%mm3 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" /* abs(block[i]) */
+ "psubw %%mm3, %%mm1 \n\t" /* abs(block[i]) */
+ "pmullw %%mm4, %%mm0 \n\t" /* abs(block[i])*q */
+ "pmullw %%mm5, %%mm1 \n\t" /* abs(block[i])*q */
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" /* FIXME slow */
+ "pcmpeqw (%0, %%eax), %%mm4 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "psraw $3, %%mm0 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
+ "movq %%mm4, (%0, %%eax) \n\t"
+ "movq %%mm5, 8(%0, %%eax) \n\t"
+
+ "addl $16, %%eax \n\t"
+ "js 1b \n\t"
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "g" (-2*nCoeffs)
+ : "%eax", "memory"
+ );
block[0]= block0;
- //Note, we dont do mismatch control for intra as errors cannot accumulate
+ /* Note, we dont do mismatch control for intra as errors cannot accumulate */
} else {
quant_matrix = s->inter_matrix;
-asm volatile(
- "pcmpeqw %%mm7, %%mm7 \n\t"
+__asm__ volatile(
+ "pcmpeqw %%mm7, %%mm7 \n\t"
"psrlq $48, %%mm7 \n\t"
- "movd %2, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
- "packssdw %%mm6, %%mm6 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
+ "packssdw %%mm6, %%mm6 \n\t"
"movl %3, %%eax \n\t"
- ".balign 16\n\t"
- "1: \n\t"
- "movq (%0, %%eax), %%mm0 \n\t"
- "movq 8(%0, %%eax), %%mm1 \n\t"
- "movq (%1, %%eax), %%mm4 \n\t"
- "movq 8(%1, %%eax), %%mm5 \n\t"
- "pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
- "pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
- "pxor %%mm2, %%mm2 \n\t"
- "pxor %%mm3, %%mm3 \n\t"
- "pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
- "pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
- "psubw %%mm2, %%mm0 \n\t" // abs(block[i])
- "psubw %%mm3, %%mm1 \n\t" // abs(block[i])
- "paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
- "paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
- "pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*2*q
- "pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*2*q
- "paddw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
- "paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm5, %%mm5 \n\t" // FIXME slow
- "pcmpeqw (%0, %%eax), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
- "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" // block[i] == 0 ? -1 : 0
- "psrlw $4, %%mm0 \n\t"
- "psrlw $4, %%mm1 \n\t"
- "pxor %%mm2, %%mm0 \n\t"
- "pxor %%mm3, %%mm1 \n\t"
- "psubw %%mm2, %%mm0 \n\t"
- "psubw %%mm3, %%mm1 \n\t"
- "pandn %%mm0, %%mm4 \n\t"
- "pandn %%mm1, %%mm5 \n\t"
+ ".balign 16\n\t"
+ "1: \n\t"
+ "movq (%0, %%eax), %%mm0 \n\t"
+ "movq 8(%0, %%eax), %%mm1 \n\t"
+ "movq (%1, %%eax), %%mm4 \n\t"
+ "movq 8(%1, %%eax), %%mm5 \n\t"
+ "pmullw %%mm6, %%mm4 \n\t" /* q=qscale*quant_matrix[i] */
+ "pmullw %%mm6, %%mm5 \n\t" /* q=qscale*quant_matrix[i] */
+ "pxor %%mm2, %%mm2 \n\t"
+ "pxor %%mm3, %%mm3 \n\t"
+ "pcmpgtw %%mm0, %%mm2 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pcmpgtw %%mm1, %%mm3 \n\t" /* block[i] < 0 ? -1 : 0 */
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t" /* abs(block[i]) */
+ "psubw %%mm3, %%mm1 \n\t" /* abs(block[i]) */
+ "paddw %%mm0, %%mm0 \n\t" /* abs(block[i])*2 */
+ "paddw %%mm1, %%mm1 \n\t" /* abs(block[i])*2 */
+ "pmullw %%mm4, %%mm0 \n\t" /* abs(block[i])*2*q */
+ "pmullw %%mm5, %%mm1 \n\t" /* abs(block[i])*2*q */
+ "paddw %%mm4, %%mm0 \n\t" /* (abs(block[i])*2 + 1)*q */
+ "paddw %%mm5, %%mm1 \n\t" /* (abs(block[i])*2 + 1)*q */
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t" /* FIXME slow */
+ "pcmpeqw (%0, %%eax), %%mm4 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "pcmpeqw 8(%0, %%eax), %%mm5 \n\t" /* block[i] == 0 ? -1 : 0 */
+ "psrlw $4, %%mm0 \n\t"
+ "psrlw $4, %%mm1 \n\t"
+ "pxor %%mm2, %%mm0 \n\t"
+ "pxor %%mm3, %%mm1 \n\t"
+ "psubw %%mm2, %%mm0 \n\t"
+ "psubw %%mm3, %%mm1 \n\t"
+ "pandn %%mm0, %%mm4 \n\t"
+ "pandn %%mm1, %%mm5 \n\t"
"pxor %%mm4, %%mm7 \n\t"
"pxor %%mm5, %%mm7 \n\t"
- "movq %%mm4, (%0, %%eax) \n\t"
- "movq %%mm5, 8(%0, %%eax) \n\t"
+ "movq %%mm4, (%0, %%eax) \n\t"
+ "movq %%mm5, 8(%0, %%eax) \n\t"
- "addl $16, %%eax \n\t"
- "js 1b \n\t"
+ "addl $16, %%eax \n\t"
+ "js 1b \n\t"
"movd 124(%0, %3), %%mm0 \n\t"
"movq %%mm7, %%mm6 \n\t"
"psrlq $32, %%mm7 \n\t"
@@ -439,14 +439,14 @@ asm volatile(
"psrlq $15, %%mm7 \n\t"
"pxor %%mm7, %%mm0 \n\t"
"movd %%mm0, 124(%0, %3) \n\t"
-
- ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "r" (-2*nCoeffs)
- : "%eax", "memory"
+
+ ::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "g" (qscale), "r" (-2*nCoeffs)
+ : "%eax", "memory"
);
}
}
-/* draw the edges of width 'w' of an image of size width, height
+/* draw the edges of width 'w' of an image of size width, height
this mmx version can only handle w==8 || w==16 */
static void draw_edges_mmx(UINT8 *buf, int wrap, int width, int height, int w)
{
@@ -458,78 +458,78 @@ static void draw_edges_mmx(UINT8 *buf, int wrap, int width, int height, int w)
ptr = buf;
if(w==8)
{
- asm volatile(
- "1: \n\t"
- "movd (%0), %%mm0 \n\t"
- "punpcklbw %%mm0, %%mm0 \n\t"
- "punpcklwd %%mm0, %%mm0 \n\t"
- "punpckldq %%mm0, %%mm0 \n\t"
- "movq %%mm0, -8(%0) \n\t"
- "movq -8(%0, %2), %%mm1 \n\t"
- "punpckhbw %%mm1, %%mm1 \n\t"
- "punpckhwd %%mm1, %%mm1 \n\t"
- "punpckhdq %%mm1, %%mm1 \n\t"
- "movq %%mm1, (%0, %2) \n\t"
- "addl %1, %0 \n\t"
- "cmpl %3, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (ptr)
- : "r" (wrap), "r" (width), "r" (ptr + wrap*height)
+ __asm__ volatile(
+ "1: \n\t"
+ "movd (%0), %%mm0 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpcklwd %%mm0, %%mm0 \n\t"
+ "punpckldq %%mm0, %%mm0 \n\t"
+ "movq %%mm0, -8(%0) \n\t"
+ "movq -8(%0, %2), %%mm1 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpckhwd %%mm1, %%mm1 \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movq %%mm1, (%0, %2) \n\t"
+ "addl %1, %0 \n\t"
+ "cmpl %3, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (ptr)
+ : "r" (wrap), "r" (width), "r" (ptr + wrap*height)
);
}
else
{
- asm volatile(
- "1: \n\t"
- "movd (%0), %%mm0 \n\t"
- "punpcklbw %%mm0, %%mm0 \n\t"
- "punpcklwd %%mm0, %%mm0 \n\t"
- "punpckldq %%mm0, %%mm0 \n\t"
- "movq %%mm0, -8(%0) \n\t"
- "movq %%mm0, -16(%0) \n\t"
- "movq -8(%0, %2), %%mm1 \n\t"
- "punpckhbw %%mm1, %%mm1 \n\t"
- "punpckhwd %%mm1, %%mm1 \n\t"
- "punpckhdq %%mm1, %%mm1 \n\t"
- "movq %%mm1, (%0, %2) \n\t"
- "movq %%mm1, 8(%0, %2) \n\t"
- "addl %1, %0 \n\t"
- "cmpl %3, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (ptr)
- : "r" (wrap), "r" (width), "r" (ptr + wrap*height)
+ __asm__ volatile(
+ "1: \n\t"
+ "movd (%0), %%mm0 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpcklwd %%mm0, %%mm0 \n\t"
+ "punpckldq %%mm0, %%mm0 \n\t"
+ "movq %%mm0, -8(%0) \n\t"
+ "movq %%mm0, -16(%0) \n\t"
+ "movq -8(%0, %2), %%mm1 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpckhwd %%mm1, %%mm1 \n\t"
+ "punpckhdq %%mm1, %%mm1 \n\t"
+ "movq %%mm1, (%0, %2) \n\t"
+ "movq %%mm1, 8(%0, %2) \n\t"
+ "addl %1, %0 \n\t"
+ "cmpl %3, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (ptr)
+ : "r" (wrap), "r" (width), "r" (ptr + wrap*height)
);
}
-
+
for(i=0;i<w;i+=4) {
/* top and bottom (and hopefully also the corners) */
ptr= buf - (i + 1) * wrap - w;
- asm volatile(
- "1: \n\t"
- "movq (%1, %0), %%mm0 \n\t"
- "movq %%mm0, (%0) \n\t"
- "movq %%mm0, (%0, %2) \n\t"
- "movq %%mm0, (%0, %2, 2) \n\t"
- "movq %%mm0, (%0, %3) \n\t"
- "addl $8, %0 \n\t"
- "cmpl %4, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (ptr)
- : "r" ((int)buf - (int)ptr - w), "r" (-wrap), "r" (-wrap*3), "r" (ptr+width+2*w)
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm0, (%0, %2) \n\t"
+ "movq %%mm0, (%0, %2, 2) \n\t"
+ "movq %%mm0, (%0, %3) \n\t"
+ "addl $8, %0 \n\t"
+ "cmpl %4, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (ptr)
+ : "r" ((int)buf - (int)ptr - w), "r" (-wrap), "r" (-wrap*3), "r" (ptr+width+2*w)
);
ptr= last_line + (i + 1) * wrap - w;
- asm volatile(
- "1: \n\t"
- "movq (%1, %0), %%mm0 \n\t"
- "movq %%mm0, (%0) \n\t"
- "movq %%mm0, (%0, %2) \n\t"
- "movq %%mm0, (%0, %2, 2) \n\t"
- "movq %%mm0, (%0, %3) \n\t"
- "addl $8, %0 \n\t"
- "cmpl %4, %0 \n\t"
- " jb 1b \n\t"
- : "+r" (ptr)
- : "r" ((int)last_line - (int)ptr - w), "r" (wrap), "r" (wrap*3), "r" (ptr+width+2*w)
+ __asm__ volatile(
+ "1: \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq %%mm0, (%0) \n\t"
+ "movq %%mm0, (%0, %2) \n\t"
+ "movq %%mm0, (%0, %2, 2) \n\t"
+ "movq %%mm0, (%0, %3) \n\t"
+ "addl $8, %0 \n\t"
+ "cmpl %4, %0 \n\t"
+ " jb 1b \n\t"
+ : "+r" (ptr)
+ : "r" ((int)last_line - (int)ptr - w), "r" (wrap), "r" (wrap*3), "r" (ptr+width+2*w)
);
}
}