diff options
author | František Dvořák <valtri@users.sourceforge.net> | 2005-05-07 09:11:37 +0000 |
---|---|---|
committer | František Dvořák <valtri@users.sourceforge.net> | 2005-05-07 09:11:37 +0000 |
commit | f2f91f2d7d1de1f0d9f4249d90e4edfd01ed0bb4 (patch) | |
tree | f77e37e2963b45a14ed2447b62f7817e80a32d34 /src/xine-utils | |
parent | eed0d8c57fb0c0a7917a820dfc009baf81f223c0 (diff) | |
download | xine-lib-f2f91f2d7d1de1f0d9f4249d90e4edfd01ed0bb4.tar.gz xine-lib-f2f91f2d7d1de1f0d9f4249d90e4edfd01ed0bb4.tar.bz2 |
*BUGFIX*
gcc4 patches from Dams Nadé (livna.org) and Keenan Pepper.
CVS patchset: 7527
CVS date: 2005/05/07 09:11:37
Diffstat (limited to 'src/xine-utils')
-rw-r--r-- | src/xine-utils/color.c | 6 | ||||
-rw-r--r-- | src/xine-utils/memcpy.c | 16 |
2 files changed, 11 insertions, 11 deletions
diff --git a/src/xine-utils/color.c b/src/xine-utils/color.c index d01e8b649..4e1e7b9ef 100644 --- a/src/xine-utils/color.c +++ b/src/xine-utils/color.c @@ -61,7 +61,7 @@ * instructions), these macros will automatically map to those special * instructions. * - * $Id: color.c,v 1.25 2005/01/23 23:01:13 jstembridge Exp $ + * $Id: color.c,v 1.26 2005/05/07 09:11:40 valtri Exp $ */ #include "xine_internal.h" @@ -495,8 +495,8 @@ static void vscale_chroma_line (unsigned char *dst, int pitch, /* process blocks of 4 pixels */ for (x=0; x < (width / 4); x++) { - n1 = *(((unsigned int *) src1)++); - n2 = *(((unsigned int *) src2)++); + n1 = *(((unsigned int *) src1)); src1 = ((unsigned int *) src1) + 1; + n2 = *(((unsigned int *) src2)); src2 = ((unsigned int *) src2) + 1; n3 = (n1 & 0xFF00FF00) >> 8; n4 = (n2 & 0xFF00FF00) >> 8; n1 &= 0x00FF00FF; diff --git a/src/xine-utils/memcpy.c b/src/xine-utils/memcpy.c index ee86b3759..d448f4902 100644 --- a/src/xine-utils/memcpy.c +++ b/src/xine-utils/memcpy.c @@ -218,8 +218,8 @@ static void * sse_memcpy(void * to, const void * from, size_t len) "movntps %%xmm2, 32(%1)\n" "movntps %%xmm3, 48(%1)\n" :: "r" (from), "r" (to) : "memory"); - ((const unsigned char *)from)+=64; - ((unsigned char *)to)+=64; + from = ((const unsigned char *)from) + 64; + to = ((unsigned char *)to) + 64; } else /* @@ -241,8 +241,8 @@ static void * sse_memcpy(void * to, const void * from, size_t len) "movntps %%xmm2, 32(%1)\n" "movntps %%xmm3, 48(%1)\n" :: "r" (from), "r" (to) : "memory"); - ((const unsigned char *)from)+=64; - ((unsigned char *)to)+=64; + from = ((const unsigned char *)from) + 64; + to = ((unsigned char *)to) + 64; } /* since movntq is weakly-ordered, a "sfence" * is needed to become ordered again. */ @@ -296,8 +296,8 @@ static void * mmx_memcpy(void * to, const void * from, size_t len) "movq %%mm6, 48(%1)\n" "movq %%mm7, 56(%1)\n" :: "r" (from), "r" (to) : "memory"); - ((const unsigned char *)from)+=64; - ((unsigned char *)to)+=64; + from = ((const unsigned char *)from) + 64; + to = ((unsigned char *)to) + 64; } __asm__ __volatile__ ("emms":::"memory"); } @@ -363,8 +363,8 @@ static void * mmx2_memcpy(void * to, const void * from, size_t len) "movntq %%mm6, 48(%1)\n" "movntq %%mm7, 56(%1)\n" :: "r" (from), "r" (to) : "memory"); - ((const unsigned char *)from)+=64; - ((unsigned char *)to)+=64; + from = ((const unsigned char *)from) + 64; + to = ((unsigned char *)to) + 64; } /* since movntq is weakly-ordered, a "sfence" * is needed to become ordered again. */ |