summaryrefslogtreecommitdiff
path: root/src/libffmpeg/libavcodec/i386
diff options
context:
space:
mode:
authorMiguel Freitas <miguelfreitas@users.sourceforge.net>2005-10-23 02:11:16 +0000
committerMiguel Freitas <miguelfreitas@users.sourceforge.net>2005-10-23 02:11:16 +0000
commit712a40b59a0f451a558d88db1f9f124463016f97 (patch)
tree82bc80e9b7a5ba09c01acf205c8ac1e20c44914f /src/libffmpeg/libavcodec/i386
parent640063a719556f8569fe848528b39660c6fc31a6 (diff)
downloadxine-lib-712a40b59a0f451a558d88db1f9f124463016f97.tar.gz
xine-lib-712a40b59a0f451a558d88db1f9f124463016f97.tar.bz2
big ffmpeg update
it is likely to break, so please keep both pieces CVS patchset: 7767 CVS date: 2005/10/23 02:11:16
Diffstat (limited to 'src/libffmpeg/libavcodec/i386')
-rw-r--r--src/libffmpeg/libavcodec/i386/Makefile.am3
-rw-r--r--src/libffmpeg/libavcodec/i386/dsputil_mmx.c191
-rw-r--r--src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h6
-rw-r--r--src/libffmpeg/libavcodec/i386/fdct_mmx.c2
-rw-r--r--src/libffmpeg/libavcodec/i386/h264dsp_mmx.c104
-rw-r--r--src/libffmpeg/libavcodec/i386/idct_mmx.c46
-rw-r--r--src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c533
-rw-r--r--src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c28
-rw-r--r--src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c1
9 files changed, 840 insertions, 74 deletions
diff --git a/src/libffmpeg/libavcodec/i386/Makefile.am b/src/libffmpeg/libavcodec/i386/Makefile.am
index f692ba62b..1a0bf2343 100644
--- a/src/libffmpeg/libavcodec/i386/Makefile.am
+++ b/src/libffmpeg/libavcodec/i386/Makefile.am
@@ -1,6 +1,6 @@
include $(top_srcdir)/misc/Makefile.common
-AM_CFLAGS = $(LIBFFMPEG_CFLAGS)
+AM_CFLAGS = $(LIBFFMPEG_CFLAGS) -I../../libavutil
# CFLAGS is here to filter out -funroll-loops because it causes bad
# behavior of libavcodec
CFLAGS = `echo @CFLAGS@ | sed -e 's/-funroll-loops//g'`
@@ -15,6 +15,7 @@ libavcodec_mmx_src = \
fdct_mmx.c \
fft_sse.c \
idct_mmx.c \
+ idct_mmx_xvid.c \
motion_est_mmx.c \
mpegvideo_mmx.c \
simple_idct_mmx.c \
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c
index 6bd2b32b9..c1dd2176a 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx.c
@@ -29,6 +29,8 @@
//#include <assert.h>
extern const uint8_t ff_h263_loop_filter_strength[32];
+extern void ff_idct_xvid_mmx(short *block);
+extern void ff_idct_xvid_mmx2(short *block);
int mm_flags; /* multimedia extension flags */
@@ -615,31 +617,32 @@ static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
}
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
+ void *dst_reg = dst, *src_reg = src;
+
asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
- "movd %4, %%mm0 \n\t"
- "movd %5, %%mm1 \n\t"
- "movd %6, %%mm2 \n\t"
- "movd %7, %%mm3 \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd (%1,%5), %%mm1 \n\t"
+ "lea (%1, %5, 2), %1 \n\t"
+ "movd (%1), %%mm2 \n\t"
+ "movd (%1,%5), %%mm3 \n\t"
"punpcklbw %%mm1, %%mm0 \n\t"
"punpcklbw %%mm3, %%mm2 \n\t"
"movq %%mm0, %%mm1 \n\t"
"punpcklwd %%mm2, %%mm0 \n\t"
"punpckhwd %%mm2, %%mm1 \n\t"
- "movd %%mm0, %0 \n\t"
+ "movd %%mm0, (%0) \n\t"
"punpckhdq %%mm0, %%mm0 \n\t"
- "movd %%mm0, %1 \n\t"
- "movd %%mm1, %2 \n\t"
+ "movd %%mm0, (%0,%4) \n\t"
+ "lea (%0, %4, 2), %0 \n\t"
+ "movd %%mm1, (%0) \n\t"
"punpckhdq %%mm1, %%mm1 \n\t"
- "movd %%mm1, %3 \n\t"
-
- : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
- "=m" (*(uint32_t*)(dst + 1*dst_stride)),
- "=m" (*(uint32_t*)(dst + 2*dst_stride)),
- "=m" (*(uint32_t*)(dst + 3*dst_stride))
- : "m" (*(uint32_t*)(src + 0*src_stride)),
- "m" (*(uint32_t*)(src + 1*src_stride)),
- "m" (*(uint32_t*)(src + 2*src_stride)),
- "m" (*(uint32_t*)(src + 3*src_stride))
+ "movd %%mm1, (%0,%4) \n\t"
+ : "=&r" (dst_reg),
+ "=&r" (src_reg)
+ : "0" (dst_reg),
+ "1" (src_reg),
+ "r" (dst_stride),
+ "r" (src_stride)
);
}
@@ -742,31 +745,49 @@ static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int
int tmp;
asm volatile (
"movl %4,%%ecx\n"
+ "shr $1,%%ecx\n"
"pxor %%mm0,%%mm0\n" /* mm0 = 0 */
"pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
"1:\n"
- "movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
- "movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
+ "movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
+ "movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
+ "movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
+ "movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
+ /* todo: mm1-mm2, mm3-mm4 */
+ /* algo: substract mm1 from mm2 with saturation and vice versa */
+ /* OR the results to get absolute difference */
"movq %%mm1,%%mm5\n"
+ "movq %%mm3,%%mm6\n"
"psubusb %%mm2,%%mm1\n"
+ "psubusb %%mm4,%%mm3\n"
"psubusb %%mm5,%%mm2\n"
+ "psubusb %%mm6,%%mm4\n"
"por %%mm1,%%mm2\n"
+ "por %%mm3,%%mm4\n"
+ /* now convert to 16-bit vectors so we can square them */
"movq %%mm2,%%mm1\n"
+ "movq %%mm4,%%mm3\n"
"punpckhbw %%mm0,%%mm2\n"
+ "punpckhbw %%mm0,%%mm4\n"
"punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
+ "punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
"pmaddwd %%mm2,%%mm2\n"
+ "pmaddwd %%mm4,%%mm4\n"
"pmaddwd %%mm1,%%mm1\n"
+ "pmaddwd %%mm3,%%mm3\n"
- "add %3,%0\n"
- "add %3,%1\n"
+ "lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
+ "lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
"paddd %%mm2,%%mm1\n"
+ "paddd %%mm4,%%mm3\n"
"paddd %%mm1,%%mm7\n"
+ "paddd %%mm3,%%mm7\n"
"decl %%ecx\n"
"jnz 1b\n"
@@ -841,6 +862,68 @@ static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int
return tmp;
}
+static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ int tmp;
+ asm volatile (
+ "shr $1,%2\n"
+ "pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
+ "pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
+ "1:\n"
+ "movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
+ "movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
+ "movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
+ "movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
+
+ /* todo: mm1-mm2, mm3-mm4 */
+ /* algo: substract mm1 from mm2 with saturation and vice versa */
+ /* OR the results to get absolute difference */
+ "movdqa %%xmm1,%%xmm5\n"
+ "movdqa %%xmm3,%%xmm6\n"
+ "psubusb %%xmm2,%%xmm1\n"
+ "psubusb %%xmm4,%%xmm3\n"
+ "psubusb %%xmm5,%%xmm2\n"
+ "psubusb %%xmm6,%%xmm4\n"
+
+ "por %%xmm1,%%xmm2\n"
+ "por %%xmm3,%%xmm4\n"
+
+ /* now convert to 16-bit vectors so we can square them */
+ "movdqa %%xmm2,%%xmm1\n"
+ "movdqa %%xmm4,%%xmm3\n"
+
+ "punpckhbw %%xmm0,%%xmm2\n"
+ "punpckhbw %%xmm0,%%xmm4\n"
+ "punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
+ "punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
+
+ "pmaddwd %%xmm2,%%xmm2\n"
+ "pmaddwd %%xmm4,%%xmm4\n"
+ "pmaddwd %%xmm1,%%xmm1\n"
+ "pmaddwd %%xmm3,%%xmm3\n"
+
+ "lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
+ "lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
+
+ "paddd %%xmm2,%%xmm1\n"
+ "paddd %%xmm4,%%xmm3\n"
+ "paddd %%xmm1,%%xmm7\n"
+ "paddd %%xmm3,%%xmm7\n"
+
+ "decl %2\n"
+ "jnz 1b\n"
+
+ "movdqa %%xmm7,%%xmm1\n"
+ "psrldq $8, %%xmm7\n" /* shift hi qword to lo */
+ "paddd %%xmm1,%%xmm7\n"
+ "movdqa %%xmm7,%%xmm1\n"
+ "psrldq $4, %%xmm7\n" /* shift hi dword to lo */
+ "paddd %%xmm1,%%xmm7\n"
+ "movd %%xmm7,%3\n"
+ : "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
+ : "r" ((long)line_size));
+ return tmp;
+}
+
static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
int tmp;
asm volatile (
@@ -1080,7 +1163,8 @@ static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
return tmp + hf_noise8_mmx(pix+8, line_size, h);
}
-static int nsse16_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ MpegEncContext *c = p;
int score1= sse16_mmx(c, pix1, pix2, line_size, h);
int score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
@@ -1088,7 +1172,8 @@ static int nsse16_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int lin
else return score1 + ABS(score2)*8;
}
-static int nsse8_mmx(MpegEncContext *c, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
+ MpegEncContext *c = p;
int score1= sse8_mmx(c, pix1, pix2, line_size, h);
int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
@@ -1617,11 +1702,9 @@ static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride
"movq 64(%1), %%mm1 \n\t"
MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
- "movq %%mm0, %%mm1 \n\t"
- "psrlq $32, %%mm0 \n\t"
+ "pshufw $0x0E, %%mm0, %%mm1 \n\t"
"paddusw %%mm1, %%mm0 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "psrlq $16, %%mm0 \n\t"
+ "pshufw $0x01, %%mm0, %%mm1 \n\t"
"paddusw %%mm1, %%mm0 \n\t"
"movd %%mm0, %0 \n\t"
@@ -2455,6 +2538,28 @@ static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
ff_vp3_idct_mmx(block);
add_pixels_clamped_mmx(block, dest, line_size);
}
+#ifdef CONFIG_GPL
+static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_idct_xvid_mmx (block);
+ put_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_idct_xvid_mmx (block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_idct_xvid_mmx2 (block);
+ put_pixels_clamped_mmx(block, dest, line_size);
+}
+static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
+{
+ ff_idct_xvid_mmx2 (block);
+ add_pixels_clamped_mmx(block, dest, line_size);
+}
+#endif
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
{
@@ -2527,6 +2632,18 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->idct = ff_vp3_idct_mmx;
c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
}
+#ifdef CONFIG_GPL
+ }else if(idct_algo==FF_IDCT_XVIDMMX){
+ if(mm_flags & MM_MMXEXT){
+ c->idct_put= ff_idct_xvid_mmx2_put;
+ c->idct_add= ff_idct_xvid_mmx2_add;
+ c->idct = ff_idct_xvid_mmx2;
+ }else{
+ c->idct_put= ff_idct_xvid_mmx_put;
+ c->idct_add= ff_idct_xvid_mmx_add;
+ c->idct = ff_idct_xvid_mmx;
+ }
+#endif
}
}
@@ -2590,7 +2707,7 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->hadamard8_diff[1]= hadamard8_diff_mmx;
c->pix_norm1 = pix_norm1_mmx;
- c->sse[0] = sse16_mmx;
+ c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
c->sse[1] = sse8_mmx;
c->vsad[4]= vsad_intra16_mmx;
@@ -2716,6 +2833,24 @@ void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
+ c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
+ c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
+ c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
+ c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
+ c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
+ c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
+ c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
+ c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
+
+ c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
+ c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
+ c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
+ c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
+ c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
+ c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
+ c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
+ c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
+
#ifdef CONFIG_ENCODERS
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
#endif //CONFIG_ENCODERS
diff --git a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
index 20ea1b59e..a56374b63 100644
--- a/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
+++ b/src/libffmpeg/libavcodec/i386/dsputil_mmx_rnd.h
@@ -55,7 +55,7 @@ static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line
:REG_a, "memory");
}
-static void DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+static void attribute_unused DEF(put, pixels8_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
{
MOVQ_BFE(mm6);
__asm __volatile(
@@ -151,7 +151,7 @@ static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, int lin
:REG_a, "memory");
}
-static void DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
+static void attribute_unused DEF(put, pixels16_l2)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int dstStride, int src1Stride, int h)
{
MOVQ_BFE(mm6);
__asm __volatile(
@@ -296,7 +296,7 @@ static void DEF(put, pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int lin
}
// avg_pixels
-static void DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+static void attribute_unused DEF(avg, pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h)
{
MOVQ_BFE(mm6);
JUMPALIGN();
diff --git a/src/libffmpeg/libavcodec/i386/fdct_mmx.c b/src/libffmpeg/libavcodec/i386/fdct_mmx.c
index aacbe5743..6a13090a1 100644
--- a/src/libffmpeg/libavcodec/i386/fdct_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/fdct_mmx.c
@@ -13,7 +13,7 @@
* a page about fdct at http://www.geocities.com/ssavekar/dct.htm
* Skal's fdct at http://skal.planet-d.net/coding/dct.html
*/
-#include "../common.h"
+#include "common.h"
#include "../dsputil.h"
#include "mmx.h"
diff --git a/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c b/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c
index 47fcf938b..c278affc8 100644
--- a/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/h264dsp_mmx.c
@@ -162,11 +162,10 @@ void ff_h264_idct_add_mmx2(uint8_t *dst, int16_t *block, int stride)
/* delta = (avg(q0, p1>>2) + (d&a))
* - (avg(p0, q1>>2) + (d&~a)) */\
"pavgb %%mm2, %%mm0 \n\t"\
- "movq %%mm5, %%mm6 \n\t"\
- "pand %%mm4, %%mm6 \n\t"\
- "paddusb %%mm6, %%mm0 \n\t"\
+ "pand %%mm5, %%mm4 \n\t"\
+ "paddusb %%mm4, %%mm0 \n\t"\
"pavgb %%mm1, %%mm3 \n\t"\
- "pandn %%mm5, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm4 \n\t"\
"paddusb %%mm4, %%mm3 \n\t"\
/* p0 += clip(delta, -tc0, tc0)
* q0 -= clip(delta, -tc0, tc0) */\
@@ -910,3 +909,100 @@ H264_MC(avg_, 16,mmx2)
#undef H264_CHROMA_OP
#undef H264_CHROMA_MC8_TMPL
+/***********************************/
+/* weighted prediction */
+
+static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
+{
+ int x, y;
+ offset <<= log2_denom;
+ offset += (1 << log2_denom) >> 1;
+ asm volatile(
+ "movd %0, %%mm4 \n\t"
+ "movd %1, %%mm5 \n\t"
+ "movd %2, %%mm6 \n\t"
+ "pshufw $0, %%mm4, %%mm4 \n\t"
+ "pshufw $0, %%mm5, %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ :: "g"(weight), "g"(offset), "g"(log2_denom)
+ );
+ for(y=0; y<h; y+=2){
+ for(x=0; x<w; x+=4){
+ asm volatile(
+ "movd %0, %%mm0 \n\t"
+ "movd %1, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "pmullw %%mm4, %%mm0 \n\t"
+ "pmullw %%mm4, %%mm1 \n\t"
+ "paddw %%mm5, %%mm0 \n\t"
+ "paddw %%mm5, %%mm1 \n\t"
+ "psraw %%mm6, %%mm0 \n\t"
+ "psraw %%mm6, %%mm1 \n\t"
+ "packuswb %%mm7, %%mm0 \n\t"
+ "packuswb %%mm7, %%mm1 \n\t"
+ "movd %%mm0, %0 \n\t"
+ "movd %%mm1, %1 \n\t"
+ : "+m"(*(uint32_t*)(dst+x)),
+ "+m"(*(uint32_t*)(dst+x+stride))
+ );
+ }
+ dst += 2*stride;
+ }
+}
+
+static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offsetd, int offsets, int w, int h)
+{
+ int x, y;
+ int offset = ((offsets + offsetd + 1) | 1) << log2_denom;
+ asm volatile(
+ "movd %0, %%mm3 \n\t"
+ "movd %1, %%mm4 \n\t"
+ "movd %2, %%mm5 \n\t"
+ "movd %3, %%mm6 \n\t"
+ "pshufw $0, %%mm3, %%mm3 \n\t"
+ "pshufw $0, %%mm4, %%mm4 \n\t"
+ "pshufw $0, %%mm5, %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
+ );
+ for(y=0; y<h; y++){
+ for(x=0; x<w; x+=4){
+ asm volatile(
+ "movd %0, %%mm0 \n\t"
+ "movd %1, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "pmullw %%mm3, %%mm0 \n\t"
+ "pmullw %%mm4, %%mm1 \n\t"
+ "paddw %%mm5, %%mm0 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "psraw %%mm6, %%mm0 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "movd %%mm0, %0 \n\t"
+ : "+m"(*(uint32_t*)(dst+x))
+ : "m"(*(uint32_t*)(src+x))
+ );
+ }
+ src += stride;
+ dst += stride;
+ }
+}
+
+#define H264_WEIGHT(W,H) \
+static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offsetd, int offsets){ \
+ ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offsetd, offsets, W, H); \
+} \
+static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
+ ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
+}
+
+H264_WEIGHT(16,16)
+H264_WEIGHT(16, 8)
+H264_WEIGHT( 8,16)
+H264_WEIGHT( 8, 8)
+H264_WEIGHT( 8, 4)
+H264_WEIGHT( 4, 8)
+H264_WEIGHT( 4, 4)
+H264_WEIGHT( 4, 2)
+
diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx.c b/src/libffmpeg/libavcodec/i386/idct_mmx.c
index d9586efc5..d1a84549d 100644
--- a/src/libffmpeg/libavcodec/i386/idct_mmx.c
+++ b/src/libffmpeg/libavcodec/i386/idct_mmx.c
@@ -22,7 +22,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include "../common.h"
+#include "common.h"
#include "../dsputil.h"
#include "mmx.h"
@@ -86,7 +86,7 @@ static inline void idct_row (int16_t * row, int offset,
c5, -c1, c3, -c1, \
c7, c3, c7, -c5 }
-static inline void mmxext_row_head (int16_t * row, int offset, int16_t * table)
+static inline void mmxext_row_head (int16_t * row, int offset, const int16_t * table)
{
movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
@@ -102,7 +102,7 @@ static inline void mmxext_row_head (int16_t * row, int offset, int16_t * table)
pshufw_r2r (mm2, mm2, 0x4e); // mm2 = x2 x0 x6 x4
}
-static inline void mmxext_row (int16_t * table, int32_t * rounder)
+static inline void mmxext_row (const int16_t * table, const int32_t * rounder)
{
movq_m2r (*(table+8), mm1); // mm1 = -C5 -C1 C3 C1
pmaddwd_r2r (mm2, mm4); // mm4 = C4*x0+C6*x2 C4*x4+C6*x6
@@ -160,7 +160,7 @@ static inline void mmxext_row_tail (int16_t * row, int store)
}
static inline void mmxext_row_mid (int16_t * row, int store,
- int offset, int16_t * table)
+ int offset, const int16_t * table)
{
movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2
@@ -198,7 +198,7 @@ static inline void mmxext_row_mid (int16_t * row, int store,
c5, -c1, c7, -c5, \
c7, c3, c3, -c1 }
-static inline void mmx_row_head (int16_t * row, int offset, int16_t * table)
+static inline void mmx_row_head (int16_t * row, int offset, const int16_t * table)
{
movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
@@ -217,7 +217,7 @@ static inline void mmx_row_head (int16_t * row, int offset, int16_t * table)
punpckhdq_r2r (mm2, mm2); // mm2 = x6 x4 x6 x4
}
-static inline void mmx_row (int16_t * table, int32_t * rounder)
+static inline void mmx_row (const int16_t * table, const int32_t * rounder)
{
pmaddwd_r2r (mm2, mm4); // mm4 = -C4*x4-C2*x6 C4*x4+C6*x6
punpckldq_r2r (mm5, mm5); // mm5 = x3 x1 x3 x1
@@ -281,7 +281,7 @@ static inline void mmx_row_tail (int16_t * row, int store)
}
static inline void mmx_row_mid (int16_t * row, int store,
- int offset, int16_t * table)
+ int offset, const int16_t * table)
{
movq_m2r (*(row+offset), mm2); // mm2 = x6 x4 x2 x0
psrad_i2r (ROW_SHIFT, mm0); // mm0 = y3 y2
@@ -395,10 +395,10 @@ static inline void idct_col (int16_t * col, int offset)
#define T3 43790
#define C4 23170
- static short _T1[] ATTR_ALIGN(8) = {T1,T1,T1,T1};
- static short _T2[] ATTR_ALIGN(8) = {T2,T2,T2,T2};
- static short _T3[] ATTR_ALIGN(8) = {T3,T3,T3,T3};
- static short _C4[] ATTR_ALIGN(8) = {C4,C4,C4,C4};
+ static const short _T1[] ATTR_ALIGN(8) = {T1,T1,T1,T1};
+ static const short _T2[] ATTR_ALIGN(8) = {T2,T2,T2,T2};
+ static const short _T3[] ATTR_ALIGN(8) = {T3,T3,T3,T3};
+ static const short _C4[] ATTR_ALIGN(8) = {C4,C4,C4,C4};
/* column code adapted from peter gubanov */
/* http://www.elecard.com/peter/idct.shtml */
@@ -536,20 +536,20 @@ static inline void idct_col (int16_t * col, int offset)
#undef C4
}
-static int32_t rounder0[] ATTR_ALIGN(8) =
+static const int32_t rounder0[] ATTR_ALIGN(8) =
rounder ((1 << (COL_SHIFT - 1)) - 0.5);
-static int32_t rounder4[] ATTR_ALIGN(8) = rounder (0);
-static int32_t rounder1[] ATTR_ALIGN(8) =
+static const int32_t rounder4[] ATTR_ALIGN(8) = rounder (0);
+static const int32_t rounder1[] ATTR_ALIGN(8) =
rounder (1.25683487303); /* C1*(C1/C4+C1+C7)/2 */
-static int32_t rounder7[] ATTR_ALIGN(8) =
+static const int32_t rounder7[] ATTR_ALIGN(8) =
rounder (-0.25); /* C1*(C7/C4+C7-C1)/2 */
-static int32_t rounder2[] ATTR_ALIGN(8) =
+static const int32_t rounder2[] ATTR_ALIGN(8) =
rounder (0.60355339059); /* C2 * (C6+C2)/2 */
-static int32_t rounder6[] ATTR_ALIGN(8) =
+static const int32_t rounder6[] ATTR_ALIGN(8) =
rounder (-0.25); /* C2 * (C6-C2)/2 */
-static int32_t rounder3[] ATTR_ALIGN(8) =
+static const int32_t rounder3[] ATTR_ALIGN(8) =
rounder (0.087788325588); /* C3*(-C3/C4+C3+C5)/2 */
-static int32_t rounder5[] ATTR_ALIGN(8) =
+static const int32_t rounder5[] ATTR_ALIGN(8) =
rounder (-0.441341716183); /* C3*(-C5/C4+C5-C3)/2 */
#undef COL_SHIFT
@@ -558,13 +558,13 @@ static int32_t rounder5[] ATTR_ALIGN(8) =
#define declare_idct(idct,table,idct_row_head,idct_row,idct_row_tail,idct_row_mid) \
void idct (int16_t * block) \
{ \
- static int16_t table04[] ATTR_ALIGN(16) = \
+ static const int16_t table04[] ATTR_ALIGN(16) = \
table (22725, 21407, 19266, 16384, 12873, 8867, 4520); \
- static int16_t table17[] ATTR_ALIGN(16) = \
+ static const int16_t table17[] ATTR_ALIGN(16) = \
table (31521, 29692, 26722, 22725, 17855, 12299, 6270); \
- static int16_t table26[] ATTR_ALIGN(16) = \
+ static const int16_t table26[] ATTR_ALIGN(16) = \
table (29692, 27969, 25172, 21407, 16819, 11585, 5906); \
- static int16_t table35[] ATTR_ALIGN(16) = \
+ static const int16_t table35[] ATTR_ALIGN(16) = \
table (26722, 25172, 22654, 19266, 15137, 10426, 5315); \
\
idct_row_head (block, 0*8, table04); \
diff --git a/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c b/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c
new file mode 100644
index 000000000..aff57e3fa
--- /dev/null
+++ b/src/libffmpeg/libavcodec/i386/idct_mmx_xvid.c
@@ -0,0 +1,533 @@
+///****************************************************************************
+// *
+// * XVID MPEG-4 VIDEO CODEC
+// * - MMX and XMM forward discrete cosine transform -
+// *
+// * Copyright(C) 2001 Peter Ross <pross@xvid.org>
+// *
+// * This program is free software; you can redistribute it and/or modify it
+// * under the terms of the GNU General Public License as published by
+// * the Free Software Foundation; either version 2 of the License, or
+// * (at your option) any later version.
+// *
+// * This program is distributed in the hope that it will be useful,
+// * but WITHOUT ANY WARRANTY; without even the implied warranty of
+// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// * GNU General Public License for more details.
+// *
+// * You should have received a copy of the GNU General Public License
+// * along with this program; if not, write to the Free Software
+// * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+// *
+// * $Id: idct_mmx_xvid.c,v 1.1 2005/10/23 02:11:44 miguelfreitas Exp $
+// *
+// ***************************************************************************/
+
+// ****************************************************************************
+//
+// Originally provided by Intel at AP-922
+// http://developer.intel.com/vtune/cbts/strmsimd/922down.htm
+// (See more app notes at http://developer.intel.com/vtune/cbts/strmsimd/appnotes.htm)
+// but in a limited edition.
+// New macro implements a column part for precise iDCT
+// The routine precision now satisfies IEEE standard 1180-1990.
+//
+// Copyright(C) 2000-2001 Peter Gubanov <peter@elecard.net.ru>
+// Rounding trick Copyright(C) 2000 Michel Lespinasse <walken@zoy.org>
+//
+// http://www.elecard.com/peter/idct.html
+// http://www.linuxvideo.org/mpeg2dec/
+//
+// ***************************************************************************/
+//
+// These examples contain code fragments for first stage iDCT 8x8
+// (for rows) and first stage DCT 8x8 (for columns)
+//
+
+// conversion to gcc syntax by michael niedermayer
+
+
+#include <inttypes.h>
+#include "../avcodec.h"
+
+//=============================================================================
+// Macros and other preprocessor constants
+//=============================================================================
+
+#define BITS_INV_ACC 5 // 4 or 5 for IEEE
+#define SHIFT_INV_ROW (16 - BITS_INV_ACC) //11
+#define SHIFT_INV_COL (1 + BITS_INV_ACC) //6
+#define RND_INV_ROW (1024 * (6 - BITS_INV_ACC))
+#define RND_INV_COL (16 * (BITS_INV_ACC - 3))
+#define RND_INV_CORR (RND_INV_COL - 1)
+
+#define BITS_FRW_ACC 3 // 2 or 3 for accuracy
+#define SHIFT_FRW_COL BITS_FRW_ACC
+#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17)
+#define RND_FRW_ROW (262144*(BITS_FRW_ACC - 1))
+
+
+//-----------------------------------------------------------------------------
+// Various memory constants (trigonometric values or rounding values)
+//-----------------------------------------------------------------------------
+
+
+static const int16_t tg_1_16[4*4] attribute_used __attribute__ ((aligned(8))) = {
+ 13036,13036,13036,13036, // tg * (2<<16) + 0.5
+ 27146,27146,27146,27146, // tg * (2<<16) + 0.5
+ -21746,-21746,-21746,-21746, // tg * (2<<16) + 0.5
+ 23170,23170,23170,23170}; // cos * (2<<15) + 0.5
+
+static const int32_t rounder_0[2*8] attribute_used __attribute__ ((aligned(8))) = {
+ 65536,65536,
+ 3597,3597,
+ 2260,2260,
+ 1203,1203,
+ 0,0,
+ 120,120,
+ 512,512,
+ 512,512};
+
+//-----------------------------------------------------------------------------
+//
+// The first stage iDCT 8x8 - inverse DCTs of rows
+//
+//-----------------------------------------------------------------------------
+// The 8-point inverse DCT direct algorithm
+//-----------------------------------------------------------------------------
+//
+// static const short w[32] = {
+// FIX(cos_4_16), FIX(cos_2_16), FIX(cos_4_16), FIX(cos_6_16),
+// FIX(cos_4_16), FIX(cos_6_16), -FIX(cos_4_16), -FIX(cos_2_16),
+// FIX(cos_4_16), -FIX(cos_6_16), -FIX(cos_4_16), FIX(cos_2_16),
+// FIX(cos_4_16), -FIX(cos_2_16), FIX(cos_4_16), -FIX(cos_6_16),
+// FIX(cos_1_16), FIX(cos_3_16), FIX(cos_5_16), FIX(cos_7_16),
+// FIX(cos_3_16), -FIX(cos_7_16), -FIX(cos_1_16), -FIX(cos_5_16),
+// FIX(cos_5_16), -FIX(cos_1_16), FIX(cos_7_16), FIX(cos_3_16),
+// FIX(cos_7_16), -FIX(cos_5_16), FIX(cos_3_16), -FIX(cos_1_16) };
+//
+// #define DCT_8_INV_ROW(x, y)
+// {
+// int a0, a1, a2, a3, b0, b1, b2, b3;
+//
+// a0 =x[0]*w[0]+x[2]*w[1]+x[4]*w[2]+x[6]*w[3];
+// a1 =x[0]*w[4]+x[2]*w[5]+x[4]*w[6]+x[6]*w[7];
+// a2 = x[0] * w[ 8] + x[2] * w[ 9] + x[4] * w[10] + x[6] * w[11];
+// a3 = x[0] * w[12] + x[2] * w[13] + x[4] * w[14] + x[6] * w[15];
+// b0 = x[1] * w[16] + x[3] * w[17] + x[5] * w[18] + x[7] * w[19];
+// b1 = x[1] * w[20] + x[3] * w[21] + x[5] * w[22] + x[7] * w[23];
+// b2 = x[1] * w[24] + x[3] * w[25] + x[5] * w[26] + x[7] * w[27];
+// b3 = x[1] * w[28] + x[3] * w[29] + x[5] * w[30] + x[7] * w[31];
+//
+// y[0] = SHIFT_ROUND ( a0 + b0 );
+// y[1] = SHIFT_ROUND ( a1 + b1 );
+// y[2] = SHIFT_ROUND ( a2 + b2 );
+// y[3] = SHIFT_ROUND ( a3 + b3 );
+// y[4] = SHIFT_ROUND ( a3 - b3 );
+// y[5] = SHIFT_ROUND ( a2 - b2 );
+// y[6] = SHIFT_ROUND ( a1 - b1 );
+// y[7] = SHIFT_ROUND ( a0 - b0 );
+// }
+//
+//-----------------------------------------------------------------------------
+//
+// In this implementation the outputs of the iDCT-1D are multiplied
+// for rows 0,4 - by cos_4_16,
+// for rows 1,7 - by cos_1_16,
+// for rows 2,6 - by cos_2_16,
+// for rows 3,5 - by cos_3_16
+// and are shifted to the left for better accuracy
+//
+// For the constants used,
+// FIX(float_const) = (short) (float_const * (1<<15) + 0.5)
+//
+//-----------------------------------------------------------------------------
+
+//-----------------------------------------------------------------------------
+// Tables for mmx processors
+//-----------------------------------------------------------------------------
+
+// Table for rows 0,4 - constants are multiplied by cos_4_16
+static const int16_t tab_i_04_mmx[32*4] attribute_used __attribute__ ((aligned(8))) = {
+ 16384,16384,16384,-16384, // movq-> w06 w04 w02 w00
+ 21407,8867,8867,-21407, // w07 w05 w03 w01
+ 16384,-16384,16384,16384, // w14 w12 w10 w08
+ -8867,21407,-21407,-8867, // w15 w13 w11 w09
+ 22725,12873,19266,-22725, // w22 w20 w18 w16
+ 19266,4520,-4520,-12873, // w23 w21 w19 w17
+ 12873,4520,4520,19266, // w30 w28 w26 w24
+ -22725,19266,-12873,-22725, // w31 w29 w27 w25
+// Table for rows 1,7 - constants are multiplied by cos_1_16
+ 22725,22725,22725,-22725, // movq-> w06 w04 w02 w00
+ 29692,12299,12299,-29692, // w07 w05 w03 w01
+ 22725,-22725,22725,22725, // w14 w12 w10 w08
+ -12299,29692,-29692,-12299, // w15 w13 w11 w09
+ 31521,17855,26722,-31521, // w22 w20 w18 w16
+ 26722,6270,-6270,-17855, // w23 w21 w19 w17
+ 17855,6270,6270,26722, // w30 w28 w26 w24
+ -31521,26722,-17855,-31521, // w31 w29 w27 w25
+// Table for rows 2,6 - constants are multiplied by cos_2_16
+ 21407,21407,21407,-21407, // movq-> w06 w04 w02 w00
+ 27969,11585,11585,-27969, // w07 w05 w03 w01
+ 21407,-21407,21407,21407, // w14 w12 w10 w08
+ -11585,27969,-27969,-11585, // w15 w13 w11 w09
+ 29692,16819,25172,-29692, // w22 w20 w18 w16
+ 25172,5906,-5906,-16819, // w23 w21 w19 w17
+ 16819,5906,5906,25172, // w30 w28 w26 w24
+ -29692,25172,-16819,-29692, // w31 w29 w27 w25
+// Table for rows 3,5 - constants are multiplied by cos_3_16
+ 19266,19266,19266,-19266, // movq-> w06 w04 w02 w00
+ 25172,10426,10426,-25172, // w07 w05 w03 w01
+ 19266,-19266,19266,19266, // w14 w12 w10 w08
+ -10426,25172,-25172,-10426, // w15 w13 w11 w09
+ 26722,15137,22654,-26722, // w22 w20 w18 w16
+ 22654,5315,-5315,-15137, // w23 w21 w19 w17
+ 15137,5315,5315,22654, // w30 w28 w26 w24
+ -26722,22654,-15137,-26722, // w31 w29 w27 w25
+};
+//-----------------------------------------------------------------------------
+// Tables for xmm processors
+//-----------------------------------------------------------------------------
+
+// %3 for rows 0,4 - constants are multiplied by cos_4_16
+static const int16_t tab_i_04_xmm[32*4] attribute_used __attribute__ ((aligned(8))) = {
+ 16384,21407,16384,8867, // movq-> w05 w04 w01 w00
+ 16384,8867,-16384,-21407, // w07 w06 w03 w02
+ 16384,-8867,16384,-21407, // w13 w12 w09 w08
+ -16384,21407,16384,-8867, // w15 w14 w11 w10
+ 22725,19266,19266,-4520, // w21 w20 w17 w16
+ 12873,4520,-22725,-12873, // w23 w22 w19 w18
+ 12873,-22725,4520,-12873, // w29 w28 w25 w24
+ 4520,19266,19266,-22725, // w31 w30 w27 w26
+// %3 for rows 1,7 - constants are multiplied by cos_1_16
+ 22725,29692,22725,12299, // movq-> w05 w04 w01 w00
+ 22725,12299,-22725,-29692, // w07 w06 w03 w02
+ 22725,-12299,22725,-29692, // w13 w12 w09 w08
+ -22725,29692,22725,-12299, // w15 w14 w11 w10
+ 31521,26722,26722,-6270, // w21 w20 w17 w16
+ 17855,6270,-31521,-17855, // w23 w22 w19 w18
+ 17855,-31521,6270,-17855, // w29 w28 w25 w24
+ 6270,26722,26722,-31521, // w31 w30 w27 w26
+// %3 for rows 2,6 - constants are multiplied by cos_2_16
+ 21407,27969,21407,11585, // movq-> w05 w04 w01 w00
+ 21407,11585,-21407,-27969, // w07 w06 w03 w02
+ 21407,-11585,21407,-27969, // w13 w12 w09 w08
+ -21407,27969,21407,-11585, // w15 w14 w11 w10
+ 29692,25172,25172,-5906, // w21 w20 w17 w16
+ 16819,5906,-29692,-16819, // w23 w22 w19 w18
+ 16819,-29692,5906,-16819, // w29 w28 w25 w24
+ 5906,25172,25172,-29692, // w31 w30 w27 w26
+// %3 for rows 3,5 - constants are multiplied by cos_3_16
+ 19266,25172,19266,10426, // movq-> w05 w04 w01 w00
+ 19266,10426,-19266,-25172, // w07 w06 w03 w02
+ 19266,-10426,19266,-25172, // w13 w12 w09 w08
+ -19266,25172,19266,-10426, // w15 w14 w11 w10
+ 26722,22654,22654,-5315, // w21 w20 w17 w16
+ 15137,5315,-26722,-15137, // w23 w22 w19 w18
+ 15137,-26722,5315,-15137, // w29 w28 w25 w24
+ 5315,22654,22654,-26722, // w31 w30 w27 w26
+};
+//=============================================================================
+// Helper macros for the code
+//=============================================================================
+
+//-----------------------------------------------------------------------------
+// DCT_8_INV_ROW_MMX( INP, OUT, TABLE, ROUNDER
+//-----------------------------------------------------------------------------
+
+#define DCT_8_INV_ROW_MMX(A1,A2,A3,A4)\
+ "movq " #A1 ",%%mm0 \n\t"/* 0 ; x3 x2 x1 x0*/\
+ "movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\
+ "movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\
+ "movq " #A3 ",%%mm3 \n\t"/* 3 ; w06 w04 w02 w00*/\
+ "punpcklwd %%mm1,%%mm0 \n\t"/* x5 x1 x4 x0*/\
+ "movq %%mm0,%%mm5 \n\t"/* 5 ; x5 x1 x4 x0*/\
+ "punpckldq %%mm0,%%mm0 \n\t"/* x4 x0 x4 x0*/\
+ "movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w05 w03 w01*/\
+ "punpckhwd %%mm1,%%mm2 \n\t"/* 1 ; x7 x3 x6 x2*/\
+ "pmaddwd %%mm0,%%mm3 \n\t"/* x4*w06+x0*w04 x4*w02+x0*w00*/\
+ "movq %%mm2,%%mm6 \n\t"/* 6 ; x7 x3 x6 x2*/\
+ "movq 32+" #A3 ",%%mm1 \n\t"/* 1 ; w22 w20 w18 w16*/\
+ "punpckldq %%mm2,%%mm2 \n\t"/* x6 x2 x6 x2*/\
+ "pmaddwd %%mm2,%%mm4 \n\t"/* x6*w07+x2*w05 x6*w03+x2*w01*/\
+ "punpckhdq %%mm5,%%mm5 \n\t"/* x5 x1 x5 x1*/\
+ "pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x4*w14+x0*w12 x4*w10+x0*w08*/\
+ "punpckhdq %%mm6,%%mm6 \n\t"/* x7 x3 x7 x3*/\
+ "movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w21 w19 w17*/\
+ "pmaddwd %%mm5,%%mm1 \n\t"/* x5*w22+x1*w20 x5*w18+x1*w16*/\
+ "paddd " #A4 ",%%mm3 \n\t"/* +%4*/\
+ "pmaddwd %%mm6,%%mm7 \n\t"/* x7*w23+x3*w21 x7*w19+x3*w17*/\
+ "pmaddwd 24+" #A3 ",%%mm2 \n\t"/* x6*w15+x2*w13 x6*w11+x2*w09*/\
+ "paddd %%mm4,%%mm3 \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\
+ "pmaddwd 48+" #A3 ",%%mm5 \n\t"/* x5*w30+x1*w28 x5*w26+x1*w24*/\
+ "movq %%mm3,%%mm4 \n\t"/* 4 ; a1 a0*/\
+ "pmaddwd 56+" #A3 ",%%mm6 \n\t"/* x7*w31+x3*w29 x7*w27+x3*w25*/\
+ "paddd %%mm7,%%mm1 \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\
+ "paddd " #A4 ",%%mm0 \n\t"/* +%4*/\
+ "psubd %%mm1,%%mm3 \n\t"/* a1-b1 a0-b0*/\
+ "psrad $11,%%mm3 \n\t"/* y6=a1-b1 y7=a0-b0*/\
+ "paddd %%mm4,%%mm1 \n\t"/* 4 ; a1+b1 a0+b0*/\
+ "paddd %%mm2,%%mm0 \n\t"/* 2 ; a3=sum(even3) a2=sum(even2)*/\
+ "psrad $11,%%mm1 \n\t"/* y1=a1+b1 y0=a0+b0*/\
+ "paddd %%mm6,%%mm5 \n\t"/* 6 ; b3=sum(odd3) b2=sum(odd2)*/\
+ "movq %%mm0,%%mm4 \n\t"/* 4 ; a3 a2*/\
+ "paddd %%mm5,%%mm0 \n\t"/* a3+b3 a2+b2*/\
+ "psubd %%mm5,%%mm4 \n\t"/* 5 ; a3-b3 a2-b2*/\
+ "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\
+ "psrad $11,%%mm4 \n\t"/* y4=a3-b3 y5=a2-b2*/\
+ "packssdw %%mm0,%%mm1 \n\t"/* 0 ; y3 y2 y1 y0*/\
+ "packssdw %%mm3,%%mm4 \n\t"/* 3 ; y6 y7 y4 y5*/\
+ "movq %%mm4,%%mm7 \n\t"/* 7 ; y6 y7 y4 y5*/\
+ "psrld $16,%%mm4 \n\t"/* 0 y6 0 y4*/\
+ "pslld $16,%%mm7 \n\t"/* y7 0 y5 0*/\
+ "movq %%mm1," #A2 " \n\t"/* 1 ; save y3 y2 y1 y0*/\
+ "por %%mm4,%%mm7 \n\t"/* 4 ; y7 y6 y5 y4*/\
+ "movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\
+
+
+//-----------------------------------------------------------------------------
+// DCT_8_INV_ROW_XMM( INP, OUT, TABLE, ROUNDER
+//-----------------------------------------------------------------------------
+
+#define DCT_8_INV_ROW_XMM(A1,A2,A3,A4)\
+ "movq " #A1 ",%%mm0 \n\t"/* 0 ; x3 x2 x1 x0*/\
+ "movq 8+" #A1 ",%%mm1 \n\t"/* 1 ; x7 x6 x5 x4*/\
+ "movq %%mm0,%%mm2 \n\t"/* 2 ; x3 x2 x1 x0*/\
+ "movq " #A3 ",%%mm3 \n\t"/* 3 ; w05 w04 w01 w00*/\
+ "pshufw $0b10001000,%%mm0,%%mm0 \n\t"/* x2 x0 x2 x0*/\
+ "movq 8+" #A3 ",%%mm4 \n\t"/* 4 ; w07 w06 w03 w02*/\
+ "movq %%mm1,%%mm5 \n\t"/* 5 ; x7 x6 x5 x4*/\
+ "pmaddwd %%mm0,%%mm3 \n\t"/* x2*w05+x0*w04 x2*w01+x0*w00*/\
+ "movq 32+" #A3 ",%%mm6 \n\t"/* 6 ; w21 w20 w17 w16*/\
+ "pshufw $0b10001000,%%mm1,%%mm1 \n\t"/* x6 x4 x6 x4*/\
+ "pmaddwd %%mm1,%%mm4 \n\t"/* x6*w07+x4*w06 x6*w03+x4*w02*/\
+ "movq 40+" #A3 ",%%mm7 \n\t"/* 7 ; w23 w22 w19 w18*/\
+ "pshufw $0b11011101,%%mm2,%%mm2 \n\t"/* x3 x1 x3 x1*/\
+ "pmaddwd %%mm2,%%mm6 \n\t"/* x3*w21+x1*w20 x3*w17+x1*w16*/\
+ "pshufw $0b11011101,%%mm5,%%mm5 \n\t"/* x7 x5 x7 x5*/\
+ "pmaddwd %%mm5,%%mm7 \n\t"/* x7*w23+x5*w22 x7*w19+x5*w18*/\
+ "paddd " #A4 ",%%mm3 \n\t"/* +%4*/\
+ "pmaddwd 16+" #A3 ",%%mm0 \n\t"/* x2*w13+x0*w12 x2*w09+x0*w08*/\
+ "paddd %%mm4,%%mm3 \n\t"/* 4 ; a1=sum(even1) a0=sum(even0)*/\
+ "pmaddwd 24+" #A3 ",%%mm1 \n\t"/* x6*w15+x4*w14 x6*w11+x4*w10*/\
+ "movq %%mm3,%%mm4 \n\t"/* 4 ; a1 a0*/\
+ "pmaddwd 48+" #A3 ",%%mm2 \n\t"/* x3*w29+x1*w28 x3*w25+x1*w24*/\
+ "paddd %%mm7,%%mm6 \n\t"/* 7 ; b1=sum(odd1) b0=sum(odd0)*/\
+ "pmaddwd 56+" #A3 ",%%mm5 \n\t"/* x7*w31+x5*w30 x7*w27+x5*w26*/\
+ "paddd %%mm6,%%mm3 \n\t"/* a1+b1 a0+b0*/\
+ "paddd " #A4 ",%%mm0 \n\t"/* +%4*/\
+ "psrad $11,%%mm3 \n\t"/* y1=a1+b1 y0=a0+b0*/\
+ "paddd %%mm1,%%mm0 \n\t"/* 1 ; a3=sum(even3) a2=sum(even2)*/\
+ "psubd %%mm6,%%mm4 \n\t"/* 6 ; a1-b1 a0-b0*/\
+ "movq %%mm0,%%mm7 \n\t"/* 7 ; a3 a2*/\
+ "paddd %%mm5,%%mm2 \n\t"/* 5 ; b3=sum(odd3) b2=sum(odd2)*/\
+ "paddd %%mm2,%%mm0 \n\t"/* a3+b3 a2+b2*/\
+ "psrad $11,%%mm4 \n\t"/* y6=a1-b1 y7=a0-b0*/\
+ "psubd %%mm2,%%mm7 \n\t"/* 2 ; a3-b3 a2-b2*/\
+ "psrad $11,%%mm0 \n\t"/* y3=a3+b3 y2=a2+b2*/\
+ "psrad $11,%%mm7 \n\t"/* y4=a3-b3 y5=a2-b2*/\
+ "packssdw %%mm0,%%mm3 \n\t"/* 0 ; y3 y2 y1 y0*/\
+ "packssdw %%mm4,%%mm7 \n\t"/* 4 ; y6 y7 y4 y5*/\
+ "movq %%mm3, " #A2 " \n\t"/* 3 ; save y3 y2 y1 y0*/\
+ "pshufw $0b10110001,%%mm7,%%mm7 \n\t"/* y7 y6 y5 y4*/\
+ "movq %%mm7,8 +" #A2 "\n\t"/* 7 ; save y7 y6 y5 y4*/\
+
+
+//-----------------------------------------------------------------------------
+//
+// The first stage DCT 8x8 - forward DCTs of columns
+//
+// The %2puts are multiplied
+// for rows 0,4 - on cos_4_16,
+// for rows 1,7 - on cos_1_16,
+// for rows 2,6 - on cos_2_16,
+// for rows 3,5 - on cos_3_16
+// and are shifted to the left for rise of accuracy
+//
+//-----------------------------------------------------------------------------
+//
+// The 8-point scaled forward DCT algorithm (26a8m)
+//
+//-----------------------------------------------------------------------------
+//
+// #define DCT_8_FRW_COL(x, y)
+//{
+// short t0, t1, t2, t3, t4, t5, t6, t7;
+// short tp03, tm03, tp12, tm12, tp65, tm65;
+// short tp465, tm465, tp765, tm765;
+//
+// t0 = LEFT_SHIFT ( x[0] + x[7] );
+// t1 = LEFT_SHIFT ( x[1] + x[6] );
+// t2 = LEFT_SHIFT ( x[2] + x[5] );
+// t3 = LEFT_SHIFT ( x[3] + x[4] );
+// t4 = LEFT_SHIFT ( x[3] - x[4] );
+// t5 = LEFT_SHIFT ( x[2] - x[5] );
+// t6 = LEFT_SHIFT ( x[1] - x[6] );
+// t7 = LEFT_SHIFT ( x[0] - x[7] );
+//
+// tp03 = t0 + t3;
+// tm03 = t0 - t3;
+// tp12 = t1 + t2;
+// tm12 = t1 - t2;
+//
+// y[0] = tp03 + tp12;
+// y[4] = tp03 - tp12;
+//
+// y[2] = tm03 + tm12 * tg_2_16;
+// y[6] = tm03 * tg_2_16 - tm12;
+//
+// tp65 =(t6 +t5 )*cos_4_16;
+// tm65 =(t6 -t5 )*cos_4_16;
+//
+// tp765 = t7 + tp65;
+// tm765 = t7 - tp65;
+// tp465 = t4 + tm65;
+// tm465 = t4 - tm65;
+//
+// y[1] = tp765 + tp465 * tg_1_16;
+// y[7] = tp765 * tg_1_16 - tp465;
+// y[5] = tm765 * tg_3_16 + tm465;
+// y[3] = tm765 - tm465 * tg_3_16;
+//}
+//
+//-----------------------------------------------------------------------------
+
+//-----------------------------------------------------------------------------
+// DCT_8_INV_COL_4 INP,OUT
+//-----------------------------------------------------------------------------
+
+#define DCT_8_INV_COL(A1,A2)\
+ "movq 2*8(%3),%%mm0\n\t"\
+ "movq 16*3+" #A1 ",%%mm3\n\t"\
+ "movq %%mm0,%%mm1 \n\t"/* tg_3_16*/\
+ "movq 16*5+" #A1 ",%%mm5\n\t"\
+ "pmulhw %%mm3,%%mm0 \n\t"/* x3*(tg_3_16-1)*/\
+ "movq (%3),%%mm4\n\t"\
+ "pmulhw %%mm5,%%mm1 \n\t"/* x5*(tg_3_16-1)*/\
+ "movq 16*7+" #A1 ",%%mm7\n\t"\
+ "movq %%mm4,%%mm2 \n\t"/* tg_1_16*/\
+ "movq 16*1+" #A1 ",%%mm6\n\t"\
+ "pmulhw %%mm7,%%mm4 \n\t"/* x7*tg_1_16*/\
+ "paddsw %%mm3,%%mm0 \n\t"/* x3*tg_3_16*/\
+ "pmulhw %%mm6,%%mm2 \n\t"/* x1*tg_1_16*/\
+ "paddsw %%mm3,%%mm1 \n\t"/* x3+x5*(tg_3_16-1)*/\
+ "psubsw %%mm5,%%mm0 \n\t"/* x3*tg_3_16-x5 = tm35*/\
+ "movq 3*8(%3),%%mm3\n\t"\
+ "paddsw %%mm5,%%mm1 \n\t"/* x3+x5*tg_3_16 = tp35*/\
+ "paddsw %%mm6,%%mm4 \n\t"/* x1+tg_1_16*x7 = tp17*/\
+ "psubsw %%mm7,%%mm2 \n\t"/* x1*tg_1_16-x7 = tm17*/\
+ "movq %%mm4,%%mm5 \n\t"/* tp17*/\
+ "movq %%mm2,%%mm6 \n\t"/* tm17*/\
+ "paddsw %%mm1,%%mm5 \n\t"/* tp17+tp35 = b0*/\
+ "psubsw %%mm0,%%mm6 \n\t"/* tm17-tm35 = b3*/\
+ "psubsw %%mm1,%%mm4 \n\t"/* tp17-tp35 = t1*/\
+ "paddsw %%mm0,%%mm2 \n\t"/* tm17+tm35 = t2*/\
+ "movq 1*8(%3),%%mm7\n\t"\
+ "movq %%mm4,%%mm1 \n\t"/* t1*/\
+ "movq %%mm5,3*16 +" #A2 "\n\t"/* save b0*/\
+ "paddsw %%mm2,%%mm1 \n\t"/* t1+t2*/\
+ "movq %%mm6,5*16 +" #A2 "\n\t"/* save b3*/\
+ "psubsw %%mm2,%%mm4 \n\t"/* t1-t2*/\
+ "movq 2*16+" #A1 ",%%mm5\n\t"\
+ "movq %%mm7,%%mm0 \n\t"/* tg_2_16*/\
+ "movq 6*16+" #A1 ",%%mm6\n\t"\
+ "pmulhw %%mm5,%%mm0 \n\t"/* x2*tg_2_16*/\
+ "pmulhw %%mm6,%%mm7 \n\t"/* x6*tg_2_16*/\
+ "pmulhw %%mm3,%%mm1 \n\t"/* ocos_4_16*(t1+t2) = b1/2*/\
+ "movq 0*16+" #A1 ",%%mm2\n\t"\
+ "pmulhw %%mm3,%%mm4 \n\t"/* ocos_4_16*(t1-t2) = b2/2*/\
+ "psubsw %%mm6,%%mm0 \n\t"/* t2*tg_2_16-x6 = tm26*/\
+ "movq %%mm2,%%mm3 \n\t"/* x0*/\
+ "movq 4*16+" #A1 ",%%mm6\n\t"\
+ "paddsw %%mm5,%%mm7 \n\t"/* x2+x6*tg_2_16 = tp26*/\
+ "paddsw %%mm6,%%mm2 \n\t"/* x0+x4 = tp04*/\
+ "psubsw %%mm6,%%mm3 \n\t"/* x0-x4 = tm04*/\
+ "movq %%mm2,%%mm5 \n\t"/* tp04*/\
+ "movq %%mm3,%%mm6 \n\t"/* tm04*/\
+ "psubsw %%mm7,%%mm2 \n\t"/* tp04-tp26 = a3*/\
+ "paddsw %%mm0,%%mm3 \n\t"/* tm04+tm26 = a1*/\
+ "paddsw %%mm1,%%mm1 \n\t"/* b1*/\
+ "paddsw %%mm4,%%mm4 \n\t"/* b2*/\
+ "paddsw %%mm7,%%mm5 \n\t"/* tp04+tp26 = a0*/\
+ "psubsw %%mm0,%%mm6 \n\t"/* tm04-tm26 = a2*/\
+ "movq %%mm3,%%mm7 \n\t"/* a1*/\
+ "movq %%mm6,%%mm0 \n\t"/* a2*/\
+ "paddsw %%mm1,%%mm3 \n\t"/* a1+b1*/\
+ "paddsw %%mm4,%%mm6 \n\t"/* a2+b2*/\
+ "psraw $6,%%mm3 \n\t"/* dst1*/\
+ "psubsw %%mm1,%%mm7 \n\t"/* a1-b1*/\
+ "psraw $6,%%mm6 \n\t"/* dst2*/\
+ "psubsw %%mm4,%%mm0 \n\t"/* a2-b2*/\
+ "movq 3*16+" #A2 ",%%mm1 \n\t"/* load b0*/\
+ "psraw $6,%%mm7 \n\t"/* dst6*/\
+ "movq %%mm5,%%mm4 \n\t"/* a0*/\
+ "psraw $6,%%mm0 \n\t"/* dst5*/\
+ "movq %%mm3,1*16+" #A2 "\n\t"\
+ "paddsw %%mm1,%%mm5 \n\t"/* a0+b0*/\
+ "movq %%mm6,2*16+" #A2 "\n\t"\
+ "psubsw %%mm1,%%mm4 \n\t"/* a0-b0*/\
+ "movq 5*16+" #A2 ",%%mm3 \n\t"/* load b3*/\
+ "psraw $6,%%mm5 \n\t"/* dst0*/\
+ "movq %%mm2,%%mm6 \n\t"/* a3*/\
+ "psraw $6,%%mm4 \n\t"/* dst7*/\
+ "movq %%mm0,5*16+" #A2 "\n\t"\
+ "paddsw %%mm3,%%mm2 \n\t"/* a3+b3*/\
+ "movq %%mm7,6*16+" #A2 "\n\t"\
+ "psubsw %%mm3,%%mm6 \n\t"/* a3-b3*/\
+ "movq %%mm5,0*16+" #A2 "\n\t"\
+ "psraw $6,%%mm2 \n\t"/* dst3*/\
+ "movq %%mm4,7*16+" #A2 "\n\t"\
+ "psraw $6,%%mm6 \n\t"/* dst4*/\
+ "movq %%mm2,3*16+" #A2 "\n\t"\
+ "movq %%mm6,4*16+" #A2 "\n\t"
+
+//=============================================================================
+// Code
+//=============================================================================
+
+//-----------------------------------------------------------------------------
+// void idct_mmx(uint16_t block[64]);
+//-----------------------------------------------------------------------------
+
+
+void ff_idct_xvid_mmx(short *block){
+asm volatile(
+ //# Process each row
+ DCT_8_INV_ROW_MMX(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1))
+ DCT_8_INV_ROW_MMX(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1))
+ DCT_8_INV_ROW_MMX(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1))
+ DCT_8_INV_ROW_MMX(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1))
+ DCT_8_INV_ROW_MMX(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1))
+ DCT_8_INV_ROW_MMX(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1))
+ DCT_8_INV_ROW_MMX(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1))
+ DCT_8_INV_ROW_MMX(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1))
+
+ //# Process the columns (4 at a time)
+ DCT_8_INV_COL(0(%0), 0(%0))
+ DCT_8_INV_COL(8(%0), 8(%0))
+ :: "r"(block), "r"(rounder_0), "r"(tab_i_04_mmx), "r"(tg_1_16));
+}
+
+//-----------------------------------------------------------------------------
+// void idct_xmm(uint16_t block[64]);
+//-----------------------------------------------------------------------------
+
+
+void ff_idct_xvid_mmx2(short *block){
+asm volatile(
+ //# Process each row
+ DCT_8_INV_ROW_XMM(0*16(%0), 0*16(%0), 64*0(%2), 8*0(%1))
+ DCT_8_INV_ROW_XMM(1*16(%0), 1*16(%0), 64*1(%2), 8*1(%1))
+ DCT_8_INV_ROW_XMM(2*16(%0), 2*16(%0), 64*2(%2), 8*2(%1))
+ DCT_8_INV_ROW_XMM(3*16(%0), 3*16(%0), 64*3(%2), 8*3(%1))
+ DCT_8_INV_ROW_XMM(4*16(%0), 4*16(%0), 64*0(%2), 8*4(%1))
+ DCT_8_INV_ROW_XMM(5*16(%0), 5*16(%0), 64*3(%2), 8*5(%1))
+ DCT_8_INV_ROW_XMM(6*16(%0), 6*16(%0), 64*2(%2), 8*6(%1))
+ DCT_8_INV_ROW_XMM(7*16(%0), 7*16(%0), 64*1(%2), 8*7(%1))
+
+ //# Process the columns (4 at a time)
+ DCT_8_INV_COL(0(%0), 0(%0))
+ DCT_8_INV_COL(8(%0), 8(%0))
+ :: "r"(block), "r"(rounder_0), "r"(tab_i_04_xmm), "r"(tg_1_16));
+}
+
diff --git a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
index c9354dc1b..93f156ee5 100644
--- a/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
+++ b/src/libffmpeg/libavcodec/i386/mpegvideo_mmx_template.c
@@ -22,7 +22,11 @@
#ifdef HAVE_MMX2
#define SPREADW(a) "pshufw $0, " #a ", " #a " \n\t"
#define PMAXW(a,b) "pmaxsw " #a ", " #b " \n\t"
-
+#define PMAX(a,b) \
+ "pshufw $0x0E," #a ", " #b " \n\t"\
+ PMAXW(b, a)\
+ "pshufw $0x01," #a ", " #b " \n\t"\
+ PMAXW(b, a)
#else
#define SPREADW(a) \
"punpcklwd " #a ", " #a " \n\t"\
@@ -30,6 +34,14 @@
#define PMAXW(a,b) \
"psubusw " #a ", " #b " \n\t"\
"paddw " #a ", " #b " \n\t"
+#define PMAX(a,b) \
+ "movq " #a ", " #b " \n\t"\
+ "psrlq $32, " #a " \n\t"\
+ PMAXW(b, a)\
+ "movq " #a ", " #b " \n\t"\
+ "psrlq $16, " #a " \n\t"\
+ PMAXW(b, a)
+
#endif
static int RENAME(dct_quantize)(MpegEncContext *s,
@@ -119,12 +131,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
PMAXW(%%mm0, %%mm3)
"add $8, %%"REG_a" \n\t"
" js 1b \n\t"
- "movq %%mm3, %%mm0 \n\t"
- "psrlq $32, %%mm3 \n\t"
- PMAXW(%%mm0, %%mm3)
- "movq %%mm3, %%mm0 \n\t"
- "psrlq $16, %%mm3 \n\t"
- PMAXW(%%mm0, %%mm3)
+ PMAX(%%mm3, %%mm0)
"movd %%mm3, %%"REG_a" \n\t"
"movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1
: "+a" (last_non_zero_p1)
@@ -170,12 +177,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
PMAXW(%%mm0, %%mm3)
"add $8, %%"REG_a" \n\t"
" js 1b \n\t"
- "movq %%mm3, %%mm0 \n\t"
- "psrlq $32, %%mm3 \n\t"
- PMAXW(%%mm0, %%mm3)
- "movq %%mm3, %%mm0 \n\t"
- "psrlq $16, %%mm3 \n\t"
- PMAXW(%%mm0, %%mm3)
+ PMAX(%%mm3, %%mm0)
"movd %%mm3, %%"REG_a" \n\t"
"movzb %%al, %%"REG_a" \n\t" // last_non_zero_p1
: "+a" (last_non_zero_p1)
diff --git a/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c b/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c
index 97937651a..ed17891bf 100644
--- a/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c
+++ b/src/libffmpeg/libavcodec/i386/vp3dsp_sse2.c
@@ -799,7 +799,6 @@ static const unsigned short __align16 SSE2_idct_data[7 * 8] =
void ff_vp3_idct_sse2(int16_t *input_data)
{
unsigned char *input_bytes = (unsigned char *)input_data;
- unsigned char *dequant_const_bytes = (unsigned char *)SSE2_dequant_const;
unsigned char *output_data_bytes = (unsigned char *)input_data;
unsigned char *idct_data_bytes = (unsigned char *)SSE2_idct_data;
unsigned char *Eight = (unsigned char *)eight_data;