diff options
author | austriancoder <austriancoder> | 2004-09-18 23:47:42 +0000 |
---|---|---|
committer | austriancoder <austriancoder> | 2004-09-18 23:47:42 +0000 |
commit | 08926328125cb17f0e20957a5569d6afb0d6c6c3 (patch) | |
tree | cf0436e7c2a52b4dd8e55a27106c3921492bea2c /dxr3memcpy.c | |
parent | 2a5e712dcd046c1a7d136201d8d7ac814bc23f95 (diff) | |
download | vdr-plugin-dxr3-08926328125cb17f0e20957a5569d6afb0d6c6c3.tar.gz vdr-plugin-dxr3-08926328125cb17f0e20957a5569d6afb0d6c6c3.tar.bz2 |
again fixes for gcc-2.95
Diffstat (limited to 'dxr3memcpy.c')
-rw-r--r-- | dxr3memcpy.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/dxr3memcpy.c b/dxr3memcpy.c index 427856a..ec675a0 100644 --- a/dxr3memcpy.c +++ b/dxr3memcpy.c @@ -141,7 +141,7 @@ static void * sse_memcpy(void * to, const void * from, size_t len) "movntps %%xmm1, 16(%1)\n" "movntps %%xmm2, 32(%1)\n" "movntps %%xmm3, 48(%1)\n" - :: "r" (from), "r" (to) : "memory"); + : : "r" (from), "r" (to) : "memory"); ((const unsigned char *)from)+=64; ((unsigned char *)to)+=64; } @@ -164,15 +164,15 @@ static void * sse_memcpy(void * to, const void * from, size_t len) "movntps %%xmm1, 16(%1)\n" "movntps %%xmm2, 32(%1)\n" "movntps %%xmm3, 48(%1)\n" - :: "r" (from), "r" (to) : "memory"); + : : "r" (from), "r" (to) : "memory"); ((const unsigned char *)from)+=64; ((unsigned char *)to)+=64; } /* since movntq is weakly-ordered, a "sfence" * is needed to become ordered again. */ - __asm__ __volatile__ ("sfence":::"memory"); + __asm__ __volatile__ ("sfence": : :"memory"); /* enables to use FPU */ - __asm__ __volatile__ ("emms":::"memory"); + __asm__ __volatile__ ("emms": : :"memory"); } /* * Now do the tail of the block @@ -220,11 +220,11 @@ static void * mmx_memcpy(void * to, const void * from, size_t len) "movq %%mm5, 40(%1)\n" "movq %%mm6, 48(%1)\n" "movq %%mm7, 56(%1)\n" - :: "r" (from), "r" (to) : "memory"); + : : "r" (from), "r" (to) : "memory"); ((const unsigned char *)from)+=64; ((unsigned char *)to)+=64; } - __asm__ __volatile__ ("emms":::"memory"); + __asm__ __volatile__ ("emms": : :"memory"); } /* * Now do the tail of the block @@ -252,7 +252,7 @@ static void * mmx2_memcpy(void * to, const void * from, size_t len) " prefetchnta 224(%0)\n" " prefetchnta 256(%0)\n" " prefetchnta 288(%0)\n" - :: "r" (from) ); + : : "r" (from) ); if(len >= MIN_LEN) { @@ -288,14 +288,14 @@ static void * mmx2_memcpy(void * to, const void * from, size_t len) "movntq %%mm5, 40(%1)\n" "movntq %%mm6, 48(%1)\n" "movntq %%mm7, 56(%1)\n" - :: "r" (from), "r" (to) : "memory"); + : : "r" (from), "r" (to) : "memory"); ((const unsigned char *)from)+=64; ((unsigned char *)to)+=64; } /* since movntq is weakly-ordered, a "sfence" * is needed to become ordered again. */ __asm__ __volatile__ ("sfence":::"memory"); - __asm__ __volatile__ ("emms":::"memory"); + __asm__ __volatile__ ("emms": : :"memory"); } /* * Now do the tail of the block |