summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMiguel Freitas <miguelfreitas@users.sourceforge.net>2001-10-20 22:18:59 +0000
committerMiguel Freitas <miguelfreitas@users.sourceforge.net>2001-10-20 22:18:59 +0000
commit4c9f9727a0a48d2672d7915a11a2e5da99458b49 (patch)
tree331140bb80f402f00a4e472ab5cef2de540e3358
parent058e3bd311b75c2a380780dac16abd712c92e2a6 (diff)
downloadxine-lib-4c9f9727a0a48d2672d7915a11a2e5da99458b49.tar.gz
xine-lib-4c9f9727a0a48d2672d7915a11a2e5da99458b49.tar.bz2
optimized memcpy (mmx,sse,etc...)
CVS patchset: 844 CVS date: 2001/10/20 22:18:59
-rw-r--r--src/libdivx4/xine_decoder.c17
-rw-r--r--src/video_out/deinterlace.c40
-rw-r--r--src/xine-engine/Makefile.am4
-rw-r--r--src/xine-engine/memcpy.c439
-rw-r--r--src/xine-engine/memcpy.h31
-rw-r--r--src/xine-engine/xine.c6
6 files changed, 506 insertions, 31 deletions
diff --git a/src/libdivx4/xine_decoder.c b/src/libdivx4/xine_decoder.c
index 6e8b5f365..305ac5a5c 100644
--- a/src/libdivx4/xine_decoder.c
+++ b/src/libdivx4/xine_decoder.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: xine_decoder.c,v 1.4 2001/10/17 21:29:21 guenter Exp $
+ * $Id: xine_decoder.c,v 1.5 2001/10/20 22:18:59 miguelfreitas Exp $
*
* xine decoder plugin using divx4
*
@@ -48,6 +48,7 @@
#include "video_out.h"
#include "buffer.h"
#include "metronom.h"
+#include "memcpy.h"
#include "decore-if.h"
@@ -206,7 +207,7 @@ static void divx4_decode_data (video_decoder_t *this_gen, buf_element_t *buf) {
} else if (this->decoder_ok) {
- memcpy (&this->buf[this->size], buf->content, buf->size);
+ fast_memcpy (&this->buf[this->size], buf->content, buf->size);
this->size += buf->size;
@@ -259,12 +260,12 @@ static void divx4_decode_data (video_decoder_t *this_gen, buf_element_t *buf) {
int src_offset,dst_offset;
/* shortcut if stride_y equals width */
if (pict.stride_y == img->width) {
- memcpy(img->base[0], pict.y, img->width*img->height);
+ fast_memcpy(img->base[0], pict.y, img->width*img->height);
}
else { /* copy line by line */
src_offset=dst_offset = 0;
for (i=0; i<img->height; i++) {
- memcpy(img->base[0]+dst_offset, pict.y+src_offset, img->width);
+ fast_memcpy(img->base[0]+dst_offset, pict.y+src_offset, img->width);
src_offset += pict.stride_y;
dst_offset += img->width;
}
@@ -273,14 +274,14 @@ static void divx4_decode_data (video_decoder_t *this_gen, buf_element_t *buf) {
FIXME: Weird... I thought YV12 means order y-v-u, yet base[1]
seems to be u and base[2] is v. */
if (pict.stride_uv == img->width>>1) {
- memcpy(img->base[1], pict.u, (img->width*img->height)>>2);
- memcpy(img->base[2], pict.v, (img->width*img->height)>>2);
+ fast_memcpy(img->base[1], pict.u, (img->width*img->height)>>2);
+ fast_memcpy(img->base[2], pict.v, (img->width*img->height)>>2);
}
else {
src_offset=dst_offset = 0;
for (i=0; i<img->height>>1; i++) {
- memcpy(img->base[1]+dst_offset, pict.u+src_offset, img->width>>1);
- memcpy(img->base[2]+dst_offset, pict.v+src_offset, img->width>>1);
+ fast_memcpy(img->base[1]+dst_offset, pict.u+src_offset, img->width>>1);
+ fast_memcpy(img->base[2]+dst_offset, pict.v+src_offset, img->width>>1);
src_offset += pict.stride_uv;
dst_offset += img->width>>1;
}
diff --git a/src/video_out/deinterlace.c b/src/video_out/deinterlace.c
index 0c2a10b91..afdc320bb 100644
--- a/src/video_out/deinterlace.c
+++ b/src/video_out/deinterlace.c
@@ -29,6 +29,7 @@
#include "xine_internal.h"
#include "cpu_accel.h"
#include "deinterlace.h"
+#include "memcpy.h"
/*
@@ -36,7 +37,6 @@
Based on Virtual Dub plugin by Gunnar Thalin
MMX asm version from dscaler project (deinterlace.sourceforge.net)
Linux version for Xine player by Miguel Freitas
- Todo: use a MMX optimized memcpy
*/
static void deinterlace_bob_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
int width, int height )
@@ -71,9 +71,9 @@ static void deinterlace_bob_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// copy first even line no matter what, and the first odd line if we're
// processing an odd field.
- memcpy(pdst, pEvenLines, LineLength);
+ fast_memcpy(pdst, pEvenLines, LineLength);
if (IsOdd)
- memcpy(pdst + LineLength, pOddLines, LineLength);
+ fast_memcpy(pdst + LineLength, pOddLines, LineLength);
height = height / 2;
for (Line = 0; Line < height - 1; ++Line)
@@ -102,7 +102,7 @@ static void deinterlace_bob_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// half the time this function is called, those words' meanings will invert.
// Copy the odd line to the overlay verbatim.
- memcpy((char *)Dest + LineLength, YVal3, LineLength);
+ fast_memcpy((char *)Dest + LineLength, YVal3, LineLength);
n = LineLength >> 3;
while( n-- )
@@ -166,7 +166,7 @@ static void deinterlace_bob_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// Copy last odd line if we're processing an even field.
if (! IsOdd)
{
- memcpy(pdst + (height * 2 - 1) * LineLength,
+ fast_memcpy(pdst + (height * 2 - 1) * LineLength,
pOddLines + (height - 1) * SourcePitch,
LineLength);
}
@@ -234,9 +234,9 @@ static int deinterlace_weave_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// copy first even line no matter what, and the first odd line if we're
// processing an even field.
- memcpy(pdst, pEvenLines, LineLength);
+ fast_memcpy(pdst, pEvenLines, LineLength);
if (!IsOdd)
- memcpy(pdst + LineLength, pOddLines, LineLength);
+ fast_memcpy(pdst + LineLength, pOddLines, LineLength);
height = height / 2;
for (Line = 0; Line < height - 1; ++Line)
@@ -269,7 +269,7 @@ static int deinterlace_weave_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// Copy the even scanline below this one to the overlay buffer, since we'll be
// adapting the current scanline to the even lines surrounding it. The scanline
// above has already been copied by the previous pass through the loop.
- memcpy((char *)Dest + LineLength, YVal3, LineLength);
+ fast_memcpy((char *)Dest + LineLength, YVal3, LineLength);
n = LineLength >> 3;
while( n-- )
@@ -352,7 +352,7 @@ static int deinterlace_weave_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// Copy last odd line if we're processing an odd field.
if (IsOdd)
{
- memcpy(pdst + (height * 2 - 1) * LineLength,
+ fast_memcpy(pdst + (height * 2 - 1) * LineLength,
pOddLines + (height - 1) * SourcePitch,
LineLength);
}
@@ -417,9 +417,9 @@ static int deinterlace_greedy_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// copy first even line no matter what, and the first odd line if we're
// processing an EVEN field. (note diff from other deint rtns.)
- memcpy(pdst, pEvenLines, LineLength); //DL0
+ fast_memcpy(pdst, pEvenLines, LineLength); //DL0
if (!IsOdd)
- memcpy(pdst + LineLength, pOddLines, LineLength); //DL1
+ fast_memcpy(pdst + LineLength, pOddLines, LineLength); //DL1
height = height / 2;
for (Line = 0; Line < height - 1; ++Line)
@@ -443,7 +443,7 @@ static int deinterlace_greedy_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
Dest = (uint64_t *)(pdst + (Line * 2 + 2) * LineLength);
}
- memcpy((char *)Dest + LineLength, L3, LineLength);
+ fast_memcpy((char *)Dest + LineLength, L3, LineLength);
// For ease of reading, the comments below assume that we're operating on an odd
// field (i.e., that info->IsOdd is true). Assume the obvious for even lines..
@@ -524,7 +524,7 @@ static int deinterlace_greedy_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// Copy last odd line if we're processing an Odd field.
if (IsOdd)
{
- memcpy(pdst + (height * 2 - 1) * LineLength,
+ fast_memcpy(pdst + (height * 2 - 1) * LineLength,
pOddLines + (height - 1) * SourcePitch,
LineLength);
}
@@ -561,9 +561,9 @@ static void deinterlace_onefield_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// copy first even line no matter what, and the first odd line if we're
// processing an odd field.
- memcpy(pdst, pEvenLines, LineLength);
+ fast_memcpy(pdst, pEvenLines, LineLength);
if (IsOdd)
- memcpy(pdst + LineLength, pOddLines, LineLength);
+ fast_memcpy(pdst + LineLength, pOddLines, LineLength);
height = height / 2;
for (Line = 0; Line < height - 1; ++Line)
@@ -582,7 +582,7 @@ static void deinterlace_onefield_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
}
// Copy the odd line to the overlay verbatim.
- memcpy((char *)Dest + LineLength, YVal3, LineLength);
+ fast_memcpy((char *)Dest + LineLength, YVal3, LineLength);
n = LineLength >> 3;
while( n-- )
@@ -604,7 +604,7 @@ static void deinterlace_onefield_yuv_mmx( uint8_t *pdst, uint8_t *psrc[],
// Copy last odd line if we're processing an even field.
if (! IsOdd)
{
- memcpy(pdst + (height * 2 - 1) * LineLength,
+ fast_memcpy(pdst + (height * 2 - 1) * LineLength,
pOddLines + (height - 1) * SourcePitch,
LineLength);
}
@@ -650,7 +650,7 @@ void deinterlace_yuv( uint8_t *pdst, uint8_t *psrc[],
{
switch( method ) {
case DEINTERLACE_NONE:
- memcpy(pdst,psrc[0],width*height);
+ fast_memcpy(pdst,psrc[0],width*height);
break;
case DEINTERLACE_BOB:
if( check_for_mmx() )
@@ -662,7 +662,7 @@ void deinterlace_yuv( uint8_t *pdst, uint8_t *psrc[],
if( check_for_mmx() )
{
if( !deinterlace_weave_yuv_mmx(pdst,psrc,width,height) )
- memcpy(pdst,psrc[0],width*height);
+ fast_memcpy(pdst,psrc[0],width*height);
}
else /* FIXME: provide an alternative? */
abort_mmx_missing();
@@ -671,7 +671,7 @@ void deinterlace_yuv( uint8_t *pdst, uint8_t *psrc[],
if( check_for_mmx() )
{
if( !deinterlace_greedy_yuv_mmx(pdst,psrc,width,height) )
- memcpy(pdst,psrc[0],width*height);
+ fast_memcpy(pdst,psrc[0],width*height);
}
else /* FIXME: provide an alternative? */
abort_mmx_missing();
diff --git a/src/xine-engine/Makefile.am b/src/xine-engine/Makefile.am
index 8bff05047..a9dd27d7e 100644
--- a/src/xine-engine/Makefile.am
+++ b/src/xine-engine/Makefile.am
@@ -10,7 +10,7 @@ EXTRA_DIST = cpu_accel.c
lib_LTLIBRARIES = libxine.la
libxine_la_SOURCES = xine.c metronom.c configfile.c buffer.c monitor.c \
- utils.c load_plugins.c video_decoder.c \
+ utils.c memcpy.c load_plugins.c video_decoder.c \
audio_decoder.c video_out.c audio_out.c resample.c events.c lrb.c
libxine_la_LIBADD = cpu_accel.lo \
$(THREAD_LIBS) \
@@ -22,7 +22,7 @@ libxine_la_LDFLAGS = \
-release $(LT_RELEASE)
include_HEADERS = buffer.h metronom.h configfile.h \
- monitor.h cpu_accel.h attributes.h utils.h \
+ monitor.h cpu_accel.h attributes.h utils.h memcpy.h \
audio_out.h resample.h video_out.h xine_internal.h spu_decoder.h \
events.h lrb.h
diff --git a/src/xine-engine/memcpy.c b/src/xine-engine/memcpy.c
new file mode 100644
index 000000000..23a17059c
--- /dev/null
+++ b/src/xine-engine/memcpy.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2001 the xine project
+ *
+ * This file is part of xine, a unix video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ * These are the MMX/MMX2/SSE optimized versions of memcpy
+ *
+ * This code was adapted from Linux Kernel sources by Nick Kurshev to
+ * the mplayer program. (http://mplayer.sourceforge.net)
+ *
+ * Miguel Freitas split the #ifdefs into several specialized functions that
+ * are benchmarked at runtime by xine. Some original comments from Nick
+ * have been preserved documenting some MMX/SSE oddities.
+ * Also added kernel memcpy function that seems faster than glibc one.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include "xine_internal.h"
+#include "cpu_accel.h"
+
+void *(* fast_memcpy)(void *to, const void *from, size_t len);
+
+/* Original comments from mplayer (file: aclib.c)
+ This part of code was taken by me from Linux-2.4.3 and slightly modified
+for MMX, MMX2, SSE instruction set. I have done it since linux uses page aligned
+blocks but mplayer uses weakly ordered data and original sources can not
+speedup them. Only using PREFETCHNTA and MOVNTQ together have effect!
+
+>From IA-32 Intel Architecture Software Developer's Manual Volume 1,
+
+Order Number 245470:
+"10.4.6. Cacheability Control, Prefetch, and Memory Ordering Instructions"
+
+Data referenced by a program can be temporal (data will be used again) or
+non-temporal (data will be referenced once and not reused in the immediate
+future). To make efficient use of the processor's caches, it is generally
+desirable to cache temporal data and not cache non-temporal data. Overloading
+the processor's caches with non-temporal data is sometimes referred to as
+"polluting the caches".
+The non-temporal data is written to memory with Write-Combining semantics.
+
+The PREFETCHh instructions permits a program to load data into the processor
+at a suggested cache level, so that it is closer to the processors load and
+store unit when it is needed. If the data is already present in a level of
+the cache hierarchy that is closer to the processor, the PREFETCHh instruction
+will not result in any data movement.
+But we should you PREFETCHNTA: Non-temporal data fetch data into location
+close to the processor, minimizing cache pollution.
+
+The MOVNTQ (store quadword using non-temporal hint) instruction stores
+packed integer data from an MMX register to memory, using a non-temporal hint.
+The MOVNTPS (store packed single-precision floating-point values using
+non-temporal hint) instruction stores packed floating-point data from an
+XMM register to memory, using a non-temporal hint.
+
+The SFENCE (Store Fence) instruction controls write ordering by creating a
+fence for memory store operations. This instruction guarantees that the results
+of every store instruction that precedes the store fence in program order is
+globally visible before any store instruction that follows the fence. The
+SFENCE instruction provides an efficient way of ensuring ordering between
+procedures that produce weakly-ordered data and procedures that consume that
+data.
+
+If you have questions please contact with me: Nick Kurshev: nickols_k@mail.ru.
+*/
+
+/* mmx v.1 Note: Since we added alignment of destinition it speedups
+ of memory copying on PentMMX, Celeron-1 and P2 upto 12% versus
+ standard (non MMX-optimized) version.
+ Note: on K6-2+ it speedups memory copying upto 25% and
+ on K7 and P3 about 500% (5 times).
+*/
+
+#ifdef ARCH_X86
+
+/* for small memory blocks (<256 bytes) this version is faster */
+#define small_memcpy(to,from,n)\
+{\
+register unsigned long int dummy;\
+__asm__ __volatile__(\
+ "rep; movsb"\
+ :"=&D"(to), "=&S"(from), "=&c"(dummy)\
+ :"0" (to), "1" (from),"2" (n)\
+ : "memory");\
+}
+
+/* linux kernel __memcpy (from: /include/asm/string.h) */
+static inline void * __memcpy(void * to, const void * from, size_t n)
+{
+int d0, d1, d2;
+
+ if( n < 4 ) {
+ small_memcpy(to,from,n);
+ }
+ else
+ __asm__ __volatile__(
+ "rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+ : "memory");
+
+ return (to);
+}
+
+#define SSE_MMREG_SIZE 16
+#define MMX_MMREG_SIZE 8
+
+#define MMX1_MIN_LEN 0x800 /* 2K blocks */
+#define MIN_LEN 0x40 /* 64-byte blocks */
+
+/* SSE note: i tried to move 128 bytes a time instead of 64 but it
+didn't make any measureable difference. i'm using 64 for the sake of
+simplicity. [MF] */
+static void * sse_memcpy(void * to, const void * from, size_t len)
+{
+ void *retval;
+ size_t i;
+ retval = to;
+
+ if(len >= MIN_LEN)
+ {
+ register unsigned long int delta;
+ /* Align destinition to MMREG_SIZE -boundary */
+ delta = ((unsigned long int)to)&(SSE_MMREG_SIZE-1);
+ if(delta)
+ {
+ delta=SSE_MMREG_SIZE-delta;
+ len -= delta;
+ small_memcpy(to, from, delta);
+ }
+ i = len >> 6; /* len/64 */
+ len&=63;
+ if(((unsigned long)from) & 15)
+ /* if SRC is misaligned */
+ for(; i>0; i--)
+ {
+ __asm__ __volatile__ (
+ "prefetchnta 320(%0)\n"
+ "movups (%0), %%xmm0\n"
+ "movups 16(%0), %%xmm1\n"
+ "movups 32(%0), %%xmm2\n"
+ "movups 48(%0), %%xmm3\n"
+ "movntps %%xmm0, (%1)\n"
+ "movntps %%xmm1, 16(%1)\n"
+ "movntps %%xmm2, 32(%1)\n"
+ "movntps %%xmm3, 48(%1)\n"
+ :: "r" (from), "r" (to) : "memory");
+ ((const unsigned char *)from)+=64;
+ ((unsigned char *)to)+=64;
+ }
+ else
+ /*
+ Only if SRC is aligned on 16-byte boundary.
+ It allows to use movaps instead of movups, which required data
+ to be aligned or a general-protection exception (#GP) is generated.
+ */
+ for(; i>0; i--)
+ {
+ __asm__ __volatile__ (
+ "prefetchnta 320(%0)\n"
+ "movaps (%0), %%xmm0\n"
+ "movaps 16(%0), %%xmm1\n"
+ "movaps 32(%0), %%xmm2\n"
+ "movaps 48(%0), %%xmm3\n"
+ "movntps %%xmm0, (%1)\n"
+ "movntps %%xmm1, 16(%1)\n"
+ "movntps %%xmm2, 32(%1)\n"
+ "movntps %%xmm3, 48(%1)\n"
+ :: "r" (from), "r" (to) : "memory");
+ ((const unsigned char *)from)+=64;
+ ((unsigned char *)to)+=64;
+ }
+ /* since movntq is weakly-ordered, a "sfence"
+ * is needed to become ordered again. */
+ __asm__ __volatile__ ("sfence":::"memory");
+ /* enables to use FPU */
+ __asm__ __volatile__ ("emms":::"memory");
+ }
+ /*
+ * Now do the tail of the block
+ */
+ if(len) __memcpy(to, from, len);
+ return retval;
+}
+
+static void * mmx_memcpy(void * to, const void * from, size_t len)
+{
+ void *retval;
+ size_t i;
+ retval = to;
+
+ /* PREFETCH has effect even for MOVSB instruction ;) */
+ __asm__ __volatile__ (
+ " prefetchnta (%0)\n"
+ " prefetchnta 64(%0)\n"
+ " prefetchnta 128(%0)\n"
+ " prefetchnta 192(%0)\n"
+ " prefetchnta 256(%0)\n"
+ : : "r" (from) );
+
+ if(len >= MMX1_MIN_LEN)
+ {
+ register unsigned long int delta;
+ /* Align destinition to MMREG_SIZE -boundary */
+ delta = ((unsigned long int)to)&(MMX_MMREG_SIZE-1);
+ if(delta)
+ {
+ delta=MMX_MMREG_SIZE-delta;
+ len -= delta;
+ small_memcpy(to, from, delta);
+ }
+ i = len >> 6; /* len/64 */
+ len&=63;
+ for(; i>0; i--)
+ {
+ __asm__ __volatile__ (
+ "prefetchnta 320(%0)\n"
+ "movq (%0), %%mm0\n"
+ "movq 8(%0), %%mm1\n"
+ "movq 16(%0), %%mm2\n"
+ "movq 24(%0), %%mm3\n"
+ "movq 32(%0), %%mm4\n"
+ "movq 40(%0), %%mm5\n"
+ "movq 48(%0), %%mm6\n"
+ "movq 56(%0), %%mm7\n"
+ "movq %%mm0, (%1)\n"
+ "movq %%mm1, 8(%1)\n"
+ "movq %%mm2, 16(%1)\n"
+ "movq %%mm3, 24(%1)\n"
+ "movq %%mm4, 32(%1)\n"
+ "movq %%mm5, 40(%1)\n"
+ "movq %%mm6, 48(%1)\n"
+ "movq %%mm7, 56(%1)\n"
+ :: "r" (from), "r" (to) : "memory");
+ ((const unsigned char *)from)+=64;
+ ((unsigned char *)to)+=64;
+ }
+ __asm__ __volatile__ ("emms":::"memory");
+ }
+ /*
+ * Now do the tail of the block
+ */
+ if(len) __memcpy(to, from, len);
+ return retval;
+}
+
+void * mmx2_memcpy(void * to, const void * from, size_t len)
+{
+ void *retval;
+ size_t i;
+ retval = to;
+
+ /* PREFETCH has effect even for MOVSB instruction ;) */
+ __asm__ __volatile__ (
+ " prefetchnta (%0)\n"
+ " prefetchnta 64(%0)\n"
+ " prefetchnta 128(%0)\n"
+ " prefetchnta 192(%0)\n"
+ " prefetchnta 256(%0)\n"
+ : : "r" (from) );
+
+ if(len >= MIN_LEN)
+ {
+ register unsigned long int delta;
+ /* Align destinition to MMREG_SIZE -boundary */
+ delta = ((unsigned long int)to)&(MMX_MMREG_SIZE-1);
+ if(delta)
+ {
+ delta=MMX_MMREG_SIZE-delta;
+ len -= delta;
+ small_memcpy(to, from, delta);
+ }
+ i = len >> 6; /* len/64 */
+ len&=63;
+ for(; i>0; i--)
+ {
+ __asm__ __volatile__ (
+ "prefetchnta 320(%0)\n"
+ "movq (%0), %%mm0\n"
+ "movq 8(%0), %%mm1\n"
+ "movq 16(%0), %%mm2\n"
+ "movq 24(%0), %%mm3\n"
+ "movq 32(%0), %%mm4\n"
+ "movq 40(%0), %%mm5\n"
+ "movq 48(%0), %%mm6\n"
+ "movq 56(%0), %%mm7\n"
+ "movntq %%mm0, (%1)\n"
+ "movntq %%mm1, 8(%1)\n"
+ "movntq %%mm2, 16(%1)\n"
+ "movntq %%mm3, 24(%1)\n"
+ "movntq %%mm4, 32(%1)\n"
+ "movntq %%mm5, 40(%1)\n"
+ "movntq %%mm6, 48(%1)\n"
+ "movntq %%mm7, 56(%1)\n"
+ :: "r" (from), "r" (to) : "memory");
+ ((const unsigned char *)from)+=64;
+ ((unsigned char *)to)+=64;
+ }
+ /* since movntq is weakly-ordered, a "sfence"
+ * is needed to become ordered again. */
+ __asm__ __volatile__ ("sfence":::"memory");
+ __asm__ __volatile__ ("emms":::"memory");
+ }
+ /*
+ * Now do the tail of the block
+ */
+ if(len) __memcpy(to, from, len);
+ return retval;
+}
+
+static void *linux_kernel_memcpy(void *to, const void *from, size_t len) {
+ return __memcpy(to,from,len);
+}
+
+#endif /* ARCH_X86 */
+
+static struct {
+ char *name;
+ void *(* function)(void *to, const void *from, size_t len);
+ unsigned long long time;
+ uint32_t cpu_require;
+} memcpy_method[] =
+{
+ { "glibc memcpy()", memcpy, 0, 0 },
+#ifdef ARCH_X86
+ { "linux kernel memcpy()", linux_kernel_memcpy, 0, 0 },
+ { "MMX optimized memcpy()", mmx_memcpy, 0, MM_MMX },
+ { "MMXEXT optimized memcpy()", mmx2_memcpy, 0, MM_MMXEXT },
+ { "SSE optimized memcpy()", sse_memcpy, 0, MM_MMXEXT|MM_SSE },
+#endif /* ARCH_X86 */
+ { NULL, NULL, 0, 0 }
+};
+
+#ifdef ARCH_X86
+static unsigned long long int rdtsc()
+{
+ unsigned long long int x;
+ __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x));
+ return x;
+}
+#else
+static unsigned long long int rdtsc()
+{
+ /* FIXME: implement an equivalent for using optimized memcpy on other
+ architectures */
+ return 0;
+}
+#endif
+
+
+#define BUFSIZE 1024*1024
+void probe_fast_memcpy(config_values_t *config)
+{
+unsigned long long t;
+char *buf1, *buf2;
+int i, j, best;
+static int config_flags = -1;
+
+#ifdef ARCH_X86
+ config_flags = mm_accel();
+#else
+ config_flags = 0;
+#endif
+
+ best = config->lookup_int (config, "fast_memcpy", -1);
+ /* check if function is configured and valid for this machine */
+ if( best != -1 &&
+ (config_flags & memcpy_method[best].cpu_require) ==
+ memcpy_method[best].cpu_require ) {
+ printf("xine: using %s\n", memcpy_method[best].name );
+ fast_memcpy = memcpy_method[best].function;
+ return;
+ }
+
+ fast_memcpy = memcpy;
+
+ if( (buf1 = malloc(BUFSIZE)) == NULL )
+ return;
+
+ if( (buf2 = malloc(BUFSIZE)) == NULL ) {
+ free(buf1);
+ return;
+ }
+
+ printf("Benchmarking memcpy methods (smaller is better):\n");
+ /* make sure buffers are present on physical memory */
+ memcpy(buf1,buf2,BUFSIZE);
+
+ for(i=0; memcpy_method[i].name; i++)
+ {
+ if( (config_flags & memcpy_method[i].cpu_require) !=
+ memcpy_method[i].cpu_require )
+ continue;
+
+ t = rdtsc();
+ for(j=0;j<100;j++)
+ memcpy_method[i].function(buf1,buf2,BUFSIZE);
+ t = rdtsc() - t;
+ memcpy_method[i].time = t;
+
+ printf("\t%s : %lld\n",memcpy_method[i].name, t);
+
+ if( best == -1 || t < memcpy_method[best].time )
+ best = i;
+ }
+ printf("xine: using %s\n", memcpy_method[best].name );
+ fast_memcpy = memcpy_method[best].function;
+ config->set_int (config, "fast_memcpy", best );
+
+ free(buf1);
+ free(buf2);
+}
diff --git a/src/xine-engine/memcpy.h b/src/xine-engine/memcpy.h
new file mode 100644
index 000000000..68a5dd991
--- /dev/null
+++ b/src/xine-engine/memcpy.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2000-2001 the xine project
+ *
+ * This file is part of xine, a unix video player.
+ *
+ * xine is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * xine is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
+ *
+ */
+
+#ifndef HAVE_MEMCPY_H
+#define HAVE_MEMCPY_H
+
+
+/* optimized/fast memcpy */
+extern void *(* fast_memcpy)(void *to, const void *from, size_t len);
+
+/* benchmark available memcpy methods */
+void probe_fast_memcpy(xine_t *this);
+#endif
diff --git a/src/xine-engine/xine.c b/src/xine-engine/xine.c
index 12452aaee..694729a66 100644
--- a/src/xine-engine/xine.c
+++ b/src/xine-engine/xine.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: xine.c,v 1.70 2001/10/20 02:01:51 guenter Exp $
+ * $Id: xine.c,v 1.71 2001/10/20 22:18:59 miguelfreitas Exp $
*
* top-level xine functions
*
@@ -52,6 +52,7 @@
#include "configfile.h"
#include "monitor.h"
#include "utils.h"
+#include "memcpy.h"
#ifndef __GNUC__
#define __FUNCTION__ __func__
@@ -391,6 +392,9 @@ xine_t *xine_init (vo_driver_t *vo,
this->config = config;
xine_debug = config->lookup_int (config, "xine_debug", 0);
+ /* probe for optimized memcpy or config setting */
+ probe_fast_memcpy(config);
+
/*
* init locks
*/