summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorStephen Torri <storri@users.sourceforge.net>2002-10-22 05:15:37 +0000
committerStephen Torri <storri@users.sourceforge.net>2002-10-22 05:15:37 +0000
commit660e0c2f655995a4fdcbb701f113e52c60eedf63 (patch)
treeeb0bf3f811c0f9eb74ee19623ffdb999b68e3a9a /src
parentbf51b667bb3dabeba965e65307181322fa4ec1b3 (diff)
downloadxine-lib-660e0c2f655995a4fdcbb701f113e52c60eedf63.tar.gz
xine-lib-660e0c2f655995a4fdcbb701f113e52c60eedf63.tar.bz2
Changed comments to standard /* ... */
CVS patchset: 2920 CVS date: 2002/10/22 05:15:37
Diffstat (limited to 'src')
-rw-r--r--src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h8
-rw-r--r--src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h18
2 files changed, 13 insertions, 13 deletions
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h
index 6873432ce..818ec0e6d 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h
+++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx_avg.h
@@ -21,7 +21,7 @@
* mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
* and improved by Zdenek Kabelac <kabi@users.sf.net>
*/
-
+
/* XXX: we use explicit registers to avoid a gcc 2.95.2 register asm
clobber bug - now it will work with 2.95.2 and also with -fPIC
*/
@@ -92,7 +92,7 @@ static void DEF(put_pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_siz
:"r" (line_size)
:"%eax", "memory");
}
-
+
/* GL: this function does incorrect rounding if overflow */
static void DEF(put_no_rnd_pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
@@ -293,7 +293,7 @@ static void DEF(avg_pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_size
:"%eax", "memory");
}
-// Note this is not correctly rounded, but this function is only used for b frames so it doesnt matter
+/* Note this is not correctly rounded, but this function is only used for b frames so it doesnt matter */
static void DEF(avg_pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BONE(mm6);
@@ -335,7 +335,7 @@ static void DEF(avg_pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_siz
:"%eax", "memory");
}
-//FIXME the following could be optimized too ...
+/* FIXME the following could be optimized too ... */
static void DEF(put_no_rnd_pixels16_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
DEF(put_no_rnd_pixels8_x2)(block , pixels , line_size, h);
DEF(put_no_rnd_pixels8_x2)(block+8, pixels+8, line_size, h);
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
index 3605e03f9..a6e84c199 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
+++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
@@ -21,7 +21,7 @@
* and improved by Zdenek Kabelac <kabi@users.sf.net>
*/
-// put_pixels
+/* put_pixels */
static void DEF(put, pixels8_x2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BFE(mm6);
@@ -132,7 +132,7 @@ static void DEF(put, pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_siz
static void DEF(put, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_ZERO(mm7);
- SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
+ SET_RND(mm6); /* =2 for rnd and =1 for no_rnd version */
__asm __volatile(
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm4 \n\t"
@@ -168,7 +168,7 @@ static void DEF(put, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_si
"movq %%mm4, (%2, %%eax) \n\t"
"addl %3, %%eax \n\t"
- "movq (%1, %%eax), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
+ "movq (%1, %%eax), %%mm2 \n\t" /* 0 <-> 2 1 <-> 3 */
"movq 1(%1, %%eax), %%mm4 \n\t"
"movq %%mm2, %%mm3 \n\t"
"movq %%mm4, %%mm5 \n\t"
@@ -195,8 +195,8 @@ static void DEF(put, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_si
:"eax", "memory");
}
-// avg_pixels
-// in case more speed is needed - unroling would certainly help
+/* avg_pixels */
+/* in case more speed is needed - unroling would certainly help */
static void DEF(avg, pixels8)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_BFE(mm6);
@@ -324,11 +324,11 @@ static void DEF(avg, pixels8_y2)(UINT8 *block, const UINT8 *pixels, int line_siz
:"eax", "memory");
}
-// this routine is 'slightly' suboptimal but mostly unused
+/* this routine is 'slightly' suboptimal but mostly unused */
static void DEF(avg, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_size, int h)
{
MOVQ_ZERO(mm7);
- SET_RND(mm6); // =2 for rnd and =1 for no_rnd version
+ SET_RND(mm6); /* =2 for rnd and =1 for no_rnd version */
__asm __volatile(
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm4 \n\t"
@@ -368,7 +368,7 @@ static void DEF(avg, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_si
"movq %%mm5, (%2, %%eax) \n\t"
"addl %3, %%eax \n\t"
- "movq (%1, %%eax), %%mm2 \n\t" // 0 <-> 2 1 <-> 3
+ "movq (%1, %%eax), %%mm2 \n\t" /* 0 <-> 2 1 <-> 3 */
"movq 1(%1, %%eax), %%mm4 \n\t"
"movq %%mm2, %%mm3 \n\t"
"movq %%mm4, %%mm5 \n\t"
@@ -399,7 +399,7 @@ static void DEF(avg, pixels8_xy2)(UINT8 *block, const UINT8 *pixels, int line_si
:"eax", "memory");
}
-//FIXME optimize
+/* FIXME optimize */
static void DEF(put, pixels16_y2)(UINT8 *block, const UINT8 *pixels, int line_size, int h){
DEF(put, pixels8_y2)(block , pixels , line_size, h);
DEF(put, pixels8_y2)(block+8, pixels+8, line_size, h);