summaryrefslogtreecommitdiff
path: root/src/xine-utils
diff options
context:
space:
mode:
Diffstat (limited to 'src/xine-utils')
-rw-r--r--src/xine-utils/color.c6
-rw-r--r--src/xine-utils/memcpy.c16
2 files changed, 11 insertions, 11 deletions
diff --git a/src/xine-utils/color.c b/src/xine-utils/color.c
index d01e8b649..4e1e7b9ef 100644
--- a/src/xine-utils/color.c
+++ b/src/xine-utils/color.c
@@ -61,7 +61,7 @@
* instructions), these macros will automatically map to those special
* instructions.
*
- * $Id: color.c,v 1.25 2005/01/23 23:01:13 jstembridge Exp $
+ * $Id: color.c,v 1.26 2005/05/07 09:11:40 valtri Exp $
*/
#include "xine_internal.h"
@@ -495,8 +495,8 @@ static void vscale_chroma_line (unsigned char *dst, int pitch,
/* process blocks of 4 pixels */
for (x=0; x < (width / 4); x++) {
- n1 = *(((unsigned int *) src1)++);
- n2 = *(((unsigned int *) src2)++);
+ n1 = *(((unsigned int *) src1)); src1 = ((unsigned int *) src1) + 1;
+ n2 = *(((unsigned int *) src2)); src2 = ((unsigned int *) src2) + 1;
n3 = (n1 & 0xFF00FF00) >> 8;
n4 = (n2 & 0xFF00FF00) >> 8;
n1 &= 0x00FF00FF;
diff --git a/src/xine-utils/memcpy.c b/src/xine-utils/memcpy.c
index ee86b3759..d448f4902 100644
--- a/src/xine-utils/memcpy.c
+++ b/src/xine-utils/memcpy.c
@@ -218,8 +218,8 @@ static void * sse_memcpy(void * to, const void * from, size_t len)
"movntps %%xmm2, 32(%1)\n"
"movntps %%xmm3, 48(%1)\n"
:: "r" (from), "r" (to) : "memory");
- ((const unsigned char *)from)+=64;
- ((unsigned char *)to)+=64;
+ from = ((const unsigned char *)from) + 64;
+ to = ((unsigned char *)to) + 64;
}
else
/*
@@ -241,8 +241,8 @@ static void * sse_memcpy(void * to, const void * from, size_t len)
"movntps %%xmm2, 32(%1)\n"
"movntps %%xmm3, 48(%1)\n"
:: "r" (from), "r" (to) : "memory");
- ((const unsigned char *)from)+=64;
- ((unsigned char *)to)+=64;
+ from = ((const unsigned char *)from) + 64;
+ to = ((unsigned char *)to) + 64;
}
/* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */
@@ -296,8 +296,8 @@ static void * mmx_memcpy(void * to, const void * from, size_t len)
"movq %%mm6, 48(%1)\n"
"movq %%mm7, 56(%1)\n"
:: "r" (from), "r" (to) : "memory");
- ((const unsigned char *)from)+=64;
- ((unsigned char *)to)+=64;
+ from = ((const unsigned char *)from) + 64;
+ to = ((unsigned char *)to) + 64;
}
__asm__ __volatile__ ("emms":::"memory");
}
@@ -363,8 +363,8 @@ static void * mmx2_memcpy(void * to, const void * from, size_t len)
"movntq %%mm6, 48(%1)\n"
"movntq %%mm7, 56(%1)\n"
:: "r" (from), "r" (to) : "memory");
- ((const unsigned char *)from)+=64;
- ((unsigned char *)to)+=64;
+ from = ((const unsigned char *)from) + 64;
+ to = ((unsigned char *)to) + 64;
}
/* since movntq is weakly-ordered, a "sfence"
* is needed to become ordered again. */