summaryrefslogtreecommitdiff
path: root/src/libffmpeg/libavcodec/ppc
diff options
context:
space:
mode:
authorMiguel Freitas <miguelfreitas@users.sourceforge.net>2003-01-31 18:29:43 +0000
committerMiguel Freitas <miguelfreitas@users.sourceforge.net>2003-01-31 18:29:43 +0000
commit5350f2b7701f01bc4f234d3971fb8a623a8cd72a (patch)
tree5f6cd350778863ad8d2612bce4ac2f6270919115 /src/libffmpeg/libavcodec/ppc
parent8b0e8647a0d0c279b6a355362452dff4bd6f5c05 (diff)
downloadxine-lib-5350f2b7701f01bc4f234d3971fb8a623a8cd72a.tar.gz
xine-lib-5350f2b7701f01bc4f234d3971fb8a623a8cd72a.tar.bz2
update ffmpeg
CVS patchset: 4068 CVS date: 2003/01/31 18:29:43
Diffstat (limited to 'src/libffmpeg/libavcodec/ppc')
-rw-r--r--src/libffmpeg/libavcodec/ppc/dsputil_altivec.c902
-rw-r--r--src/libffmpeg/libavcodec/ppc/dsputil_altivec.h65
-rw-r--r--src/libffmpeg/libavcodec/ppc/dsputil_ppc.c195
-rw-r--r--src/libffmpeg/libavcodec/ppc/fft_altivec.c149
-rw-r--r--src/libffmpeg/libavcodec/ppc/idct_altivec.c37
-rw-r--r--src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c169
-rw-r--r--src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c7
7 files changed, 1427 insertions, 97 deletions
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c
index 5f14ed0eb..dc62e70f4 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2002 Brian Foley
* Copyright (c) 2002 Dieter Shirley
+ * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
@@ -20,21 +21,39 @@
#include "../dsputil.h"
#include "dsputil_altivec.h"
-#if CONFIG_DARWIN
+#ifdef CONFIG_DARWIN
#include <sys/sysctl.h>
-#endif
+#else /* CONFIG_DARWIN */
+#include <signal.h>
+#include <setjmp.h>
+
+static sigjmp_buf jmpbuf;
+static volatile sig_atomic_t canjump = 0;
+
+static void sigill_handler (int sig)
+{
+ if (!canjump) {
+ signal (sig, SIG_DFL);
+ raise (sig);
+ }
+
+ canjump = 0;
+ siglongjmp (jmpbuf, 1);
+}
+#endif /* CONFIG_DARWIN */
int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
{
- int s, i;
- vector unsigned char *tv, zero;
+ int i;
+ int s __attribute__((aligned(16)));
+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
+ vector unsigned char *tv;
vector unsigned char pix1v, pix2v, pix2iv, avgv, t5;
vector unsigned int sad;
vector signed int sumdiffs;
s = 0;
- zero = vec_splat_u8(0);
- sad = vec_splat_u32(0);
+ sad = (vector unsigned int)vec_splat_u32(0);
for(i=0;i<16;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
@@ -72,16 +91,17 @@ int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
{
- int s, i;
- vector unsigned char *tv, zero;
+ int i;
+ int s __attribute__((aligned(16)));
+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
+ vector unsigned char *tv;
vector unsigned char pix1v, pix2v, pix3v, avgv, t5;
vector unsigned int sad;
vector signed int sumdiffs;
uint8_t *pix3 = pix2 + line_size;
s = 0;
- zero = vec_splat_u8(0);
- sad = vec_splat_u32(0);
+ sad = (vector unsigned int)vec_splat_u32(0);
/*
Due to the fact that pix3 = pix2 + line_size, the pix3 of one
@@ -131,20 +151,21 @@ int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
{
- int s, i;
+ int i;
+ int s __attribute__((aligned(16)));
uint8_t *pix3 = pix2 + line_size;
- vector unsigned char *tv, avgv, t5, zero;
+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
+ const vector unsigned short two = (const vector unsigned short)vec_splat_u16(2);
+ vector unsigned char *tv, avgv, t5;
vector unsigned char pix1v, pix2v, pix3v, pix2iv, pix3iv;
vector unsigned short pix2lv, pix2hv, pix2ilv, pix2ihv;
vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
- vector unsigned short avghv, avglv, two;
+ vector unsigned short avghv, avglv;
vector unsigned short t1, t2, t3, t4;
vector unsigned int sad;
vector signed int sumdiffs;
- zero = vec_splat_u8(0);
- two = vec_splat_u16(2);
- sad = vec_splat_u32(0);
+ sad = (vector unsigned int)vec_splat_u32(0);
s = 0;
@@ -231,14 +252,15 @@ int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
{
- int i, s;
+ int i;
+ int s __attribute__((aligned(16)));
+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
vector unsigned char perm1, perm2, *pix1v, *pix2v;
vector unsigned char t1, t2, t3,t4, t5;
- vector unsigned int sad, zero;
+ vector unsigned int sad;
vector signed int sumdiffs;
- zero = (vector unsigned int) (0);
- sad = (vector unsigned int) (0);
+ sad = (vector unsigned int)vec_splat_u32(0);
for(i=0;i<16;i++) {
@@ -272,15 +294,20 @@ int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
{
- int i, s;
+ int i;
+ int s __attribute__((aligned(16)));
+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
vector unsigned char t1, t2, t3,t4, t5;
- vector unsigned int sad, zero;
+ vector unsigned int sad;
vector signed int sumdiffs;
- zero = (vector unsigned int) (0);
- sad = (vector unsigned int) (0);
- permclear = (vector unsigned char) (255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
+ sad = (vector unsigned int)vec_splat_u32(0);
+#ifdef CONFIG_DARWIN
+ permclear = (vector unsigned char)(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
+#else
+ permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
+#endif
for(i=0;i<8;i++) {
/* Read potentially unaligned pixels into t1 and t2
@@ -315,14 +342,15 @@ int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_norm1_altivec(uint8_t *pix, int line_size)
{
- int s, i;
- vector unsigned char *tv, zero;
+ int i;
+ int s __attribute__((aligned(16)));
+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
+ vector unsigned char *tv;
vector unsigned char pixv;
vector unsigned int sv;
vector signed int sum;
-
- zero = vec_splat_u8(0);
- sv = vec_splat_u32(0);
+
+ sv = (vector unsigned int)vec_splat_u32(0);
s = 0;
for (i = 0; i < 16; i++) {
@@ -343,18 +371,127 @@ int pix_norm1_altivec(uint8_t *pix, int line_size)
return s;
}
-int pix_sum_altivec(UINT8 * pix, int line_size)
+/**
+ * Sum of Squared Errors for a 8x8 block.
+ * AltiVec-enhanced.
+ * It's the pix_abs8x8_altivec code above w/ squaring added.
+ */
+int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
{
+ int i;
+ int s __attribute__((aligned(16)));
+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
+ vector unsigned char perm1, perm2, permclear, *pix1v, *pix2v;
+ vector unsigned char t1, t2, t3,t4, t5;
+ vector unsigned int sum;
+ vector signed int sumsqr;
+
+ sum = (vector unsigned int)vec_splat_u32(0);
+#ifdef CONFIG_DARWIN
+ permclear = (vector unsigned char)(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
+#else
+ permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
+#endif
+
+ for(i=0;i<8;i++) {
+ /* Read potentially unaligned pixels into t1 and t2
+ Since we're reading 16 pixels, and actually only want 8,
+ mask out the last 8 pixels. The 0s don't change the sum. */
+ perm1 = vec_lvsl(0, pix1);
+ pix1v = (vector unsigned char *) pix1;
+ perm2 = vec_lvsl(0, pix2);
+ pix2v = (vector unsigned char *) pix2;
+ t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear);
+ t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear);
+ /*
+ Since we want to use unsigned chars, we can take advantage
+ of the fact that abs(a-b)^2 = (a-b)^2.
+ */
+
+ /* Calculate abs differences vector */
+ t3 = vec_max(t1, t2);
+ t4 = vec_min(t1, t2);
+ t5 = vec_sub(t3, t4);
+
+ /* Square the values and add them to our sum */
+ sum = vec_msum(t5, t5, sum);
+
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+
+ /* Sum up the four partial sums, and put the result into s */
+ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
+ sumsqr = vec_splat(sumsqr, 3);
+ vec_ste(sumsqr, 0, &s);
+
+ return s;
+}
+
+/**
+ * Sum of Squared Errors for a 16x16 block.
+ * AltiVec-enhanced.
+ * It's the pix_abs16x16_altivec code above w/ squaring added.
+ */
+int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
+{
+ int i;
+ int s __attribute__((aligned(16)));
+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
+ vector unsigned char perm1, perm2, *pix1v, *pix2v;
+ vector unsigned char t1, t2, t3,t4, t5;
+ vector unsigned int sum;
+ vector signed int sumsqr;
+
+ sum = (vector unsigned int)vec_splat_u32(0);
+
+ for(i=0;i<16;i++) {
+ /* Read potentially unaligned pixels into t1 and t2 */
+ perm1 = vec_lvsl(0, pix1);
+ pix1v = (vector unsigned char *) pix1;
+ perm2 = vec_lvsl(0, pix2);
+ pix2v = (vector unsigned char *) pix2;
+ t1 = vec_perm(pix1v[0], pix1v[1], perm1);
+ t2 = vec_perm(pix2v[0], pix2v[1], perm2);
+
+ /*
+ Since we want to use unsigned chars, we can take advantage
+ of the fact that abs(a-b)^2 = (a-b)^2.
+ */
+
+ /* Calculate abs differences vector */
+ t3 = vec_max(t1, t2);
+ t4 = vec_min(t1, t2);
+ t5 = vec_sub(t3, t4);
+
+ /* Square the values and add them to our sum */
+ sum = vec_msum(t5, t5, sum);
+
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+
+ /* Sum up the four partial sums, and put the result into s */
+ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
+ sumsqr = vec_splat(sumsqr, 3);
+ vec_ste(sumsqr, 0, &s);
+
+ return s;
+}
+
+int pix_sum_altivec(UINT8 * pix, int line_size)
+{
+ const vector unsigned int zero = (const vector unsigned int)vec_splat_u32(0);
vector unsigned char perm, *pixv;
vector unsigned char t1;
- vector unsigned int sad, zero;
+ vector unsigned int sad;
vector signed int sumdiffs;
- int s, i;
-
- zero = (vector unsigned int) (0);
- sad = (vector unsigned int) (0);
+ int i;
+ int s __attribute__((aligned(16)));
+
+ sad = (vector unsigned int)vec_splat_u32(0);
for (i = 0; i < 16; i++) {
/* Read the potentially unaligned 16 pixels into t1 */
@@ -380,7 +517,7 @@ void get_pixels_altivec(DCTELEM *restrict block, const UINT8 *pixels, int line_s
{
int i;
vector unsigned char perm, bytes, *pixv;
- vector unsigned char zero = (vector unsigned char) (0);
+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
vector signed short shorts;
for(i=0;i<8;i++)
@@ -407,7 +544,7 @@ void diff_pixels_altivec(DCTELEM *restrict block, const UINT8 *s1,
{
int i;
vector unsigned char perm, bytes, *pixv;
- vector unsigned char zero = (vector unsigned char) (0);
+ const vector unsigned char zero = (const vector unsigned char)vec_splat_u8(0);
vector signed short shorts1, shorts2;
for(i=0;i<4;i++)
@@ -474,10 +611,675 @@ void diff_pixels_altivec(DCTELEM *restrict block, const UINT8 *s1,
}
}
+int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
+ return pix_abs16x16_altivec(a,b,stride);
+}
+
+int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
+ return pix_abs8x8_altivec(a,b,stride);
+}
+
+void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int i;
+ for(i=0; i+7<w; i++){
+ dst[i+0] += src[i+0];
+ dst[i+1] += src[i+1];
+ dst[i+2] += src[i+2];
+ dst[i+3] += src[i+3];
+ dst[i+4] += src[i+4];
+ dst[i+5] += src[i+5];
+ dst[i+6] += src[i+6];
+ dst[i+7] += src[i+7];
+ }
+ for(; i<w; i++)
+ dst[i+0] += src[i+0];
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register int i;
+ register vector unsigned char vdst, vsrc;
+
+ /* dst and src are 16 bytes-aligned (guaranteed) */
+ for(i = 0 ; (i + 15) < w ; i++)
+ {
+ vdst = vec_ld(i << 4, (unsigned char*)dst);
+ vsrc = vec_ld(i << 4, (unsigned char*)src);
+ vdst = vec_add(vsrc, vdst);
+ vec_st(vdst, i << 4, (unsigned char*)dst);
+ }
+ /* if w is not a multiple of 16 */
+ for (; (i < w) ; i++)
+ {
+ dst[i] = src[i];
+ }
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
+
+/* next one assumes that ((line_size % 16) == 0) */
+void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+POWERPC_TBL_DECLARE(altivec_put_pixels16_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int i;
+
+POWERPC_TBL_START_COUNT(altivec_put_pixels16_num, 1);
+
+ for(i=0; i<h; i++) {
+ *((uint32_t*)(block )) = (((const struct unaligned_32 *) (pixels))->l);
+ *((uint32_t*)(block+4)) = (((const struct unaligned_32 *) (pixels+4))->l);
+ *((uint32_t*)(block+8)) = (((const struct unaligned_32 *) (pixels+8))->l);
+ *((uint32_t*)(block+12)) = (((const struct unaligned_32 *) (pixels+12))->l);
+ pixels+=line_size;
+ block +=line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_pixels16_num, 1);
+
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register vector unsigned char pixelsv1, pixelsv2;
+ register vector unsigned char perm = vec_lvsl(0, pixels);
+ int i;
+
+POWERPC_TBL_START_COUNT(altivec_put_pixels16_num, 1);
+
+ for(i=0; i<h; i++) {
+ pixelsv1 = vec_ld(0, (unsigned char*)pixels);
+ pixelsv2 = vec_ld(16, (unsigned char*)pixels);
+ vec_st(vec_perm(pixelsv1, pixelsv2, perm),
+ 0, (unsigned char*)block);
+ pixels+=line_size;
+ block +=line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_pixels16_num, 1);
+
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
+
+/* next one assumes that ((line_size % 16) == 0) */
+#define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
+void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+POWERPC_TBL_DECLARE(altivec_avg_pixels16_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int i;
+
+POWERPC_TBL_START_COUNT(altivec_avg_pixels16_num, 1);
+
+ for(i=0; i<h; i++) {
+ op_avg(*((uint32_t*)(block)),(((const struct unaligned_32 *)(pixels))->l));
+ op_avg(*((uint32_t*)(block+4)),(((const struct unaligned_32 *)(pixels+4))->l));
+ op_avg(*((uint32_t*)(block+8)),(((const struct unaligned_32 *)(pixels+8))->l));
+ op_avg(*((uint32_t*)(block+12)),(((const struct unaligned_32 *)(pixels+12))->l));
+ pixels+=line_size;
+ block +=line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_avg_pixels16_num, 1);
+
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
+ register vector unsigned char perm = vec_lvsl(0, pixels);
+ int i;
+
+POWERPC_TBL_START_COUNT(altivec_avg_pixels16_num, 1);
+
+ for(i=0; i<h; i++) {
+ pixelsv1 = vec_ld(0, (unsigned char*)pixels);
+ pixelsv2 = vec_ld(16, (unsigned char*)pixels);
+ blockv = vec_ld(0, block);
+ pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
+ blockv = vec_avg(blockv,pixelsv);
+ vec_st(blockv, 0, (unsigned char*)block);
+ pixels+=line_size;
+ block +=line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_avg_pixels16_num, 1);
+
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
+
+/* next one assumes that ((line_size % 8) == 0) */
+void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
+{
+POWERPC_TBL_DECLARE(altivec_avg_pixels8_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int i;
+POWERPC_TBL_START_COUNT(altivec_avg_pixels8_num, 1);
+ for (i = 0; i < h; i++) {
+ *((uint32_t *) (block)) =
+ (((*((uint32_t *) (block))) |
+ ((((const struct unaligned_32 *) (pixels))->l))) -
+ ((((*((uint32_t *) (block))) ^
+ ((((const struct unaligned_32 *) (pixels))->
+ l))) & 0xFEFEFEFEUL) >> 1));
+ *((uint32_t *) (block + 4)) =
+ (((*((uint32_t *) (block + 4))) |
+ ((((const struct unaligned_32 *) (pixels + 4))->l))) -
+ ((((*((uint32_t *) (block + 4))) ^
+ ((((const struct unaligned_32 *) (pixels +
+ 4))->
+ l))) & 0xFEFEFEFEUL) >> 1));
+ pixels += line_size;
+ block += line_size;
+ }
+POWERPC_TBL_STOP_COUNT(altivec_avg_pixels8_num, 1);
+
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
+ int i;
+
+POWERPC_TBL_START_COUNT(altivec_avg_pixels8_num, 1);
+
+ for (i = 0; i < h; i++) {
+ /*
+ block is 8 bytes-aligned, so we're either in the
+ left block (16 bytes-aligned) or in the right block (not)
+ */
+ int rightside = ((unsigned long)block & 0x0000000F);
+
+ blockv = vec_ld(0, block);
+ pixelsv1 = vec_ld(0, (unsigned char*)pixels);
+ pixelsv2 = vec_ld(16, (unsigned char*)pixels);
+ pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
+
+ if (rightside)
+ {
+ pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
+ }
+ else
+ {
+ pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3));
+ }
+
+ blockv = vec_avg(blockv, pixelsv);
+
+ vec_st(blockv, 0, block);
+
+ pixels += line_size;
+ block += line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_avg_pixels8_num, 1);
+
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
+
+/* next one assumes that ((line_size % 8) == 0) */
+void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+POWERPC_TBL_DECLARE(altivec_put_pixels8_xy2_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int j;
+POWERPC_TBL_START_COUNT(altivec_put_pixels8_xy2_num, 1);
+ for (j = 0; j < 2; j++) {
+ int i;
+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ const uint32_t b =
+ (((const struct unaligned_32 *) (pixels + 1))->l);
+ uint32_t l0 =
+ (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
+ uint32_t h0 =
+ ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ uint32_t l1, h1;
+ pixels += line_size;
+ for (i = 0; i < h; i += 2) {
+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) =
+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
+ pixels += line_size;
+ block += line_size;
+ a = (((const struct unaligned_32 *) (pixels))->l);
+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) =
+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
+ pixels += line_size;
+ block += line_size;
+ } pixels += 4 - line_size * (h + 1);
+ block += 4 - line_size * h;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
+
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register int i;
+ register vector unsigned char
+ pixelsv1, pixelsv2,
+ pixelsavg;
+ register vector unsigned char
+ blockv, temp1, temp2;
+ register vector unsigned short
+ pixelssum1, pixelssum2, temp3;
+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
+ register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
+
+ temp1 = vec_ld(0, pixels);
+ temp2 = vec_ld(16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
+ }
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ pixelssum1 = vec_add(pixelssum1, vctwo);
+
+POWERPC_TBL_START_COUNT(altivec_put_pixels8_xy2_num, 1);
+ for (i = 0; i < h ; i++) {
+ int rightside = ((unsigned long)block & 0x0000000F);
+ blockv = vec_ld(0, block);
+
+ temp1 = vec_ld(line_size, pixels);
+ temp2 = vec_ld(line_size + 16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
+ }
+
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ temp3 = vec_add(pixelssum1, pixelssum2);
+ temp3 = vec_sra(temp3, vctwo);
+ pixelssum1 = vec_add(pixelssum2, vctwo);
+ pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
+
+ if (rightside)
+ {
+ blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
+ }
+ else
+ {
+ blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
+ }
+
+ vec_st(blockv, 0, block);
+
+ block += line_size;
+ pixels += line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_pixels8_xy2_num, 1);
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
+
+/* next one assumes that ((line_size % 8) == 0) */
+void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+POWERPC_TBL_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int j;
+POWERPC_TBL_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
+ for (j = 0; j < 2; j++) {
+ int i;
+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ const uint32_t b =
+ (((const struct unaligned_32 *) (pixels + 1))->l);
+ uint32_t l0 =
+ (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
+ uint32_t h0 =
+ ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ uint32_t l1, h1;
+ pixels += line_size;
+ for (i = 0; i < h; i += 2) {
+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) =
+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
+ pixels += line_size;
+ block += line_size;
+ a = (((const struct unaligned_32 *) (pixels))->l);
+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) =
+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
+ pixels += line_size;
+ block += line_size;
+ } pixels += 4 - line_size * (h + 1);
+ block += 4 - line_size * h;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
+
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register int i;
+ register vector unsigned char
+ pixelsv1, pixelsv2,
+ pixelsavg;
+ register vector unsigned char
+ blockv, temp1, temp2;
+ register vector unsigned short
+ pixelssum1, pixelssum2, temp3;
+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
+ register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
+ register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
+
+ temp1 = vec_ld(0, pixels);
+ temp2 = vec_ld(16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
+ }
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ pixelssum1 = vec_add(pixelssum1, vcone);
+
+POWERPC_TBL_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
+ for (i = 0; i < h ; i++) {
+ int rightside = ((unsigned long)block & 0x0000000F);
+ blockv = vec_ld(0, block);
+
+ temp1 = vec_ld(line_size, pixels);
+ temp2 = vec_ld(line_size + 16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
+ }
+
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ temp3 = vec_add(pixelssum1, pixelssum2);
+ temp3 = vec_sra(temp3, vctwo);
+ pixelssum1 = vec_add(pixelssum2, vcone);
+ pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
+
+ if (rightside)
+ {
+ blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
+ }
+ else
+ {
+ blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
+ }
+
+ vec_st(blockv, 0, block);
+
+ block += line_size;
+ pixels += line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1);
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
+
+/* next one assumes that ((line_size % 16) == 0) */
+void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
+{
+POWERPC_TBL_DECLARE(altivec_put_pixels16_xy2_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int j;
+POWERPC_TBL_START_COUNT(altivec_put_pixels16_xy2_num, 1);
+ for (j = 0; j < 4; j++) {
+ int i;
+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ const uint32_t b =
+ (((const struct unaligned_32 *) (pixels + 1))->l);
+ uint32_t l0 =
+ (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
+ uint32_t h0 =
+ ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ uint32_t l1, h1;
+ pixels += line_size;
+ for (i = 0; i < h; i += 2) {
+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) =
+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
+ pixels += line_size;
+ block += line_size;
+ a = (((const struct unaligned_32 *) (pixels))->l);
+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) =
+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
+ pixels += line_size;
+ block += line_size;
+ } pixels += 4 - line_size * (h + 1);
+ block += 4 - line_size * h;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
+
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register int i;
+ register vector unsigned char
+ pixelsv1, pixelsv2, pixelsv3, pixelsv4;
+ register vector unsigned char
+ blockv, temp1, temp2;
+ register vector unsigned short
+ pixelssum1, pixelssum2, temp3,
+ pixelssum3, pixelssum4, temp4;
+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
+ register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
+
+ temp1 = vec_ld(0, pixels);
+ temp2 = vec_ld(16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
+ }
+ pixelsv3 = vec_mergel(vczero, pixelsv1);
+ pixelsv4 = vec_mergel(vczero, pixelsv2);
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum3 = vec_add((vector unsigned short)pixelsv3,
+ (vector unsigned short)pixelsv4);
+ pixelssum3 = vec_add(pixelssum3, vctwo);
+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ pixelssum1 = vec_add(pixelssum1, vctwo);
+
+POWERPC_TBL_START_COUNT(altivec_put_pixels16_xy2_num, 1);
+ for (i = 0; i < h ; i++) {
+ blockv = vec_ld(0, block);
+
+ temp1 = vec_ld(line_size, pixels);
+ temp2 = vec_ld(line_size + 16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
+ }
+
+ pixelsv3 = vec_mergel(vczero, pixelsv1);
+ pixelsv4 = vec_mergel(vczero, pixelsv2);
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+
+ pixelssum4 = vec_add((vector unsigned short)pixelsv3,
+ (vector unsigned short)pixelsv4);
+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ temp4 = vec_add(pixelssum3, pixelssum4);
+ temp4 = vec_sra(temp4, vctwo);
+ temp3 = vec_add(pixelssum1, pixelssum2);
+ temp3 = vec_sra(temp3, vctwo);
+
+ pixelssum3 = vec_add(pixelssum4, vctwo);
+ pixelssum1 = vec_add(pixelssum2, vctwo);
+
+ blockv = vec_packsu(temp3, temp4);
+
+ vec_st(blockv, 0, block);
+
+ block += line_size;
+ pixels += line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_pixels16_xy2_num, 1);
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
+
+/* next one assumes that ((line_size % 16) == 0) */
+void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h)
+{
+POWERPC_TBL_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int j;
+POWERPC_TBL_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
+ for (j = 0; j < 4; j++) {
+ int i;
+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ const uint32_t b =
+ (((const struct unaligned_32 *) (pixels + 1))->l);
+ uint32_t l0 =
+ (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
+ uint32_t h0 =
+ ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ uint32_t l1, h1;
+ pixels += line_size;
+ for (i = 0; i < h; i += 2) {
+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) =
+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
+ pixels += line_size;
+ block += line_size;
+ a = (((const struct unaligned_32 *) (pixels))->l);
+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL;
+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) =
+ h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL);
+ pixels += line_size;
+ block += line_size;
+ } pixels += 4 - line_size * (h + 1);
+ block += 4 - line_size * h;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
+
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register int i;
+ register vector unsigned char
+ pixelsv1, pixelsv2, pixelsv3, pixelsv4;
+ register vector unsigned char
+ blockv, temp1, temp2;
+ register vector unsigned short
+ pixelssum1, pixelssum2, temp3,
+ pixelssum3, pixelssum4, temp4;
+ register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
+ register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
+ register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
+
+ temp1 = vec_ld(0, pixels);
+ temp2 = vec_ld(16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
+ }
+ pixelsv3 = vec_mergel(vczero, pixelsv1);
+ pixelsv4 = vec_mergel(vczero, pixelsv2);
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum3 = vec_add((vector unsigned short)pixelsv3,
+ (vector unsigned short)pixelsv4);
+ pixelssum3 = vec_add(pixelssum3, vcone);
+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ pixelssum1 = vec_add(pixelssum1, vcone);
+
+POWERPC_TBL_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
+ for (i = 0; i < h ; i++) {
+ blockv = vec_ld(0, block);
+
+ temp1 = vec_ld(line_size, pixels);
+ temp2 = vec_ld(line_size + 16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
+ }
+
+ pixelsv3 = vec_mergel(vczero, pixelsv1);
+ pixelsv4 = vec_mergel(vczero, pixelsv2);
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+
+ pixelssum4 = vec_add((vector unsigned short)pixelsv3,
+ (vector unsigned short)pixelsv4);
+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ temp4 = vec_add(pixelssum3, pixelssum4);
+ temp4 = vec_sra(temp4, vctwo);
+ temp3 = vec_add(pixelssum1, pixelssum2);
+ temp3 = vec_sra(temp3, vctwo);
+
+ pixelssum3 = vec_add(pixelssum4, vcone);
+ pixelssum1 = vec_add(pixelssum2, vcone);
+
+ blockv = vec_packsu(temp3, temp4);
+
+ vec_st(blockv, 0, block);
+
+ block += line_size;
+ pixels += line_size;
+ }
+
+POWERPC_TBL_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
int has_altivec(void)
{
-#if CONFIG_DARWIN
+#ifdef CONFIG_DARWIN
int sels[2] = {CTL_HW, HW_VECTORUNIT};
int has_vu = 0;
size_t len = sizeof(has_vu);
@@ -486,7 +1288,25 @@ int has_altivec(void)
err = sysctl(sels, 2, &has_vu, &len, NULL, 0);
if (err == 0) return (has_vu != 0);
-#endif
+#else /* CONFIG_DARWIN */
+/* no Darwin, do it the brute-force way */
+/* this is borrowed from the libmpeg2 library */
+ {
+ signal (SIGILL, sigill_handler);
+ if (sigsetjmp (jmpbuf, 1)) {
+ signal (SIGILL, SIG_DFL);
+ } else {
+ canjump = 1;
+
+ asm volatile ("mtspr 256, %0\n\t"
+ "vand %%v0, %%v0, %%v0"
+ :
+ : "r" (-1));
+
+ signal (SIGILL, SIG_DFL);
+ return 1;
+ }
+ }
+#endif /* CONFIG_DARWIN */
return 0;
}
-
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h
index d4d259d9e..61dbec548 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h
@@ -17,14 +17,79 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#ifndef _DSPUTIL_ALTIVEC_
+#define _DSPUTIL_ALTIVEC_
+
+#include "dsputil_ppc.h"
+
+#ifdef HAVE_ALTIVEC
+
extern int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
+extern int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride);
+extern int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride);
extern int pix_norm1_altivec(uint8_t *pix, int line_size);
+extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
+extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_sum_altivec(UINT8 * pix, int line_size);
extern void diff_pixels_altivec(DCTELEM* block, const UINT8* s1, const UINT8* s2, int stride);
extern void get_pixels_altivec(DCTELEM* block, const UINT8 * pixels, int line_size);
+extern void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w);
+extern void put_pixels_clamped_altivec(const DCTELEM *block, UINT8 *restrict pixels, int line_size);
+extern void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
+extern void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
+extern void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h);
+extern void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
+extern void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
+extern void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h);
+extern void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h);
+
+extern void gmc1_altivec(UINT8 *dst, UINT8 *src, int stride, int h, int x16, int y16, int rounder);
+
extern int has_altivec(void);
+
+// used to build registers permutation vectors (vcprm)
+// the 's' are for words in the _s_econd vector
+#define WORD_0 0x00,0x01,0x02,0x03
+#define WORD_1 0x04,0x05,0x06,0x07
+#define WORD_2 0x08,0x09,0x0a,0x0b
+#define WORD_3 0x0c,0x0d,0x0e,0x0f
+#define WORD_s0 0x10,0x11,0x12,0x13
+#define WORD_s1 0x14,0x15,0x16,0x17
+#define WORD_s2 0x18,0x19,0x1a,0x1b
+#define WORD_s3 0x1c,0x1d,0x1e,0x1f
+
+#ifdef CONFIG_DARWIN
+#define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
+#else
+#define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
+#endif
+
+// vcprmle is used to keep the same index as in the SSE version.
+// it's the same as vcprm, with the index inversed
+// ('le' is Little Endian)
+#define vcprmle(a,b,c,d) vcprm(d,c,b,a)
+
+// used to build inverse/identity vectors (vcii)
+// n is _n_egative, p is _p_ositive
+#define FLOAT_n -1.
+#define FLOAT_p 1.
+
+
+#ifdef CONFIG_DARWIN
+#define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
+#else
+#define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d}
+#endif
+
+#else /* HAVE_ALTIVEC */
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+#error "I can't use ALTIVEC_USE_REFERENCE_C_CODE if I don't use HAVE_ALTIVEC"
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+#endif /* HAVE_ALTIVEC */
+
+#endif /* _DSPUTIL_ALTIVEC_ */
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c
index 733d0c156..c502f5819 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c
@@ -19,18 +19,168 @@
#include "../dsputil.h"
+#include "dsputil_ppc.h"
+
#ifdef HAVE_ALTIVEC
#include "dsputil_altivec.h"
#endif
int mm_flags = 0;
+int mm_support(void)
+{
+ int result = 0;
+#if HAVE_ALTIVEC
+ if (has_altivec()) {
+ result |= MM_ALTIVEC;
+ }
+#endif /* result */
+ return result;
+}
+
+#ifdef POWERPC_TBL_PERFORMANCE_REPORT
+unsigned long long perfdata[powerpc_perf_total][powerpc_data_total];
+/* list below must match enum in dsputil_ppc.h */
+static unsigned char* perfname[] = {
+ "fft_calc_altivec",
+ "gmc1_altivec",
+ "dct_unquantize_h263_altivec",
+ "idct_add_altivec",
+ "idct_put_altivec",
+ "put_pixels16_altivec",
+ "avg_pixels16_altivec",
+ "avg_pixels8_altivec",
+ "put_pixels8_xy2_altivec",
+ "put_no_rnd_pixels8_xy2_altivec",
+ "put_pixels16_xy2_altivec",
+ "put_no_rnd_pixels16_xy2_altivec",
+ "clear_blocks_dcbz32_ppc"
+};
+#ifdef POWERPC_PERF_USE_PMC
+unsigned long long perfdata_miss[powerpc_perf_total][powerpc_data_total];
+#endif
+#include <stdio.h>
+#endif
+
+#ifdef POWERPC_TBL_PERFORMANCE_REPORT
+void powerpc_display_perf_report(void)
+{
+ int i;
+#ifndef POWERPC_PERF_USE_PMC
+ fprintf(stderr, "PowerPC performance report\n Values are from the Time Base register, and represent 4 bus cycles.\n");
+#else /* POWERPC_PERF_USE_PMC */
+ fprintf(stderr, "PowerPC performance report\n Values are from the PMC registers, and represent whatever the registers are set to record.\n");
+#endif /* POWERPC_PERF_USE_PMC */
+ for(i = 0 ; i < powerpc_perf_total ; i++)
+ {
+ if (perfdata[i][powerpc_data_num] != (unsigned long long)0)
+ fprintf(stderr, " Function \"%s\" (pmc1):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n",
+ perfname[i],
+ perfdata[i][powerpc_data_min],
+ perfdata[i][powerpc_data_max],
+ (double)perfdata[i][powerpc_data_sum] /
+ (double)perfdata[i][powerpc_data_num],
+ perfdata[i][powerpc_data_num]);
+#ifdef POWERPC_PERF_USE_PMC
+ if (perfdata_miss[i][powerpc_data_num] != (unsigned long long)0)
+ fprintf(stderr, " Function \"%s\" (pmc2):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n",
+ perfname[i],
+ perfdata_miss[i][powerpc_data_min],
+ perfdata_miss[i][powerpc_data_max],
+ (double)perfdata_miss[i][powerpc_data_sum] /
+ (double)perfdata_miss[i][powerpc_data_num],
+ perfdata_miss[i][powerpc_data_num]);
+#endif
+ }
+}
+#endif /* POWERPC_TBL_PERFORMANCE_REPORT */
+
+/* ***** WARNING ***** WARNING ***** WARNING ***** */
+/*
+ clear_blocks_dcbz32_ppc will not work properly
+ on PowerPC processors with a cache line size
+ not equal to 32 bytes.
+ Fortunately all processor used by Apple up to
+ at least the 7450 (aka second generation G4)
+ use 32 bytes cache line.
+ This is due to the use of the 'dcbz' instruction.
+ It simply clear to zero a single cache line,
+ so you need to know the cache line size to use it !
+ It's absurd, but it's fast...
+*/
+void clear_blocks_dcbz32_ppc(DCTELEM *blocks)
+{
+POWERPC_TBL_DECLARE(powerpc_clear_blocks_dcbz32, 1);
+ register int misal = ((unsigned long)blocks & 0x00000010);
+ register int i = 0;
+POWERPC_TBL_START_COUNT(powerpc_clear_blocks_dcbz32, 1);
+#if 1
+ if (misal) {
+ ((unsigned long*)blocks)[0] = 0L;
+ ((unsigned long*)blocks)[1] = 0L;
+ ((unsigned long*)blocks)[2] = 0L;
+ ((unsigned long*)blocks)[3] = 0L;
+ i += 16;
+ }
+ for ( ; i < sizeof(DCTELEM)*6*64 ; i += 32) {
+ asm volatile("dcbz %0,%1" : : "r" (blocks), "r" (i) : "memory");
+ }
+ if (misal) {
+ ((unsigned long*)blocks)[188] = 0L;
+ ((unsigned long*)blocks)[189] = 0L;
+ ((unsigned long*)blocks)[190] = 0L;
+ ((unsigned long*)blocks)[191] = 0L;
+ i += 16;
+ }
+#else
+ memset(blocks, 0, sizeof(DCTELEM)*6*64);
+#endif
+POWERPC_TBL_STOP_COUNT(powerpc_clear_blocks_dcbz32, 1);
+}
+
+/* check dcbz report how many bytes are set to 0 by dcbz */
+long check_dcbz_effect(void)
+{
+ register char *fakedata = (char*)av_malloc(1024);
+ register char *fakedata_middle;
+ register long zero = 0;
+ register long i = 0;
+ long count = 0;
+
+ if (!fakedata)
+ {
+ return 0L;
+ }
+
+ fakedata_middle = (fakedata + 512);
+
+ memset(fakedata, 0xFF, 1024);
+
+ asm volatile("dcbz %0, %1" : : "r" (fakedata_middle), "r" (zero));
+
+ for (i = 0; i < 1024 ; i ++)
+ {
+ if (fakedata[i] == (char)0)
+ count++;
+ }
+
+ av_free(fakedata);
+
+ return count;
+}
+
void dsputil_init_ppc(DSPContext* c, unsigned mask)
{
// Common optimisations whether Altivec or not
- // ... pending ...
-
+ switch (check_dcbz_effect()) {
+ case 32:
+ c->clear_blocks = clear_blocks_dcbz32_ppc;
+ break;
+ default:
+ break;
+ }
+
#if HAVE_ALTIVEC
if (has_altivec()) {
mm_flags |= MM_ALTIVEC;
@@ -41,12 +191,51 @@ void dsputil_init_ppc(DSPContext* c, unsigned mask)
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_altivec;
c->pix_abs16x16 = pix_abs16x16_altivec;
c->pix_abs8x8 = pix_abs8x8_altivec;
+ c->sad[0]= sad16x16_altivec;
+ c->sad[1]= sad8x8_altivec;
c->pix_norm1 = pix_norm1_altivec;
+ c->sse[1]= sse8_altivec;
+ c->sse[0]= sse16_altivec;
c->pix_sum = pix_sum_altivec;
c->diff_pixels = diff_pixels_altivec;
c->get_pixels = get_pixels_altivec;
+// next one disabled as it's untested.
+#if 0
+ c->add_bytes= add_bytes_altivec;
+#endif /* 0 */
+ c->put_pixels_tab[0][0] = put_pixels16_altivec;
+ c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
+// next one disabled as it's untested.
+#if 0
+ c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
+#endif /* 0 */
+ c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
+ c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
+ c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
+ c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
+
+ c->gmc1 = gmc1_altivec;
+
+#ifdef POWERPC_TBL_PERFORMANCE_REPORT
+ {
+ int i;
+ for (i = 0 ; i < powerpc_perf_total ; i++)
+ {
+ perfdata[i][powerpc_data_min] = 0xFFFFFFFFFFFFFFFF;
+ perfdata[i][powerpc_data_max] = 0x0000000000000000;
+ perfdata[i][powerpc_data_sum] = 0x0000000000000000;
+ perfdata[i][powerpc_data_num] = 0x0000000000000000;
+#ifdef POWERPC_PERF_USE_PMC
+ perfdata_miss[i][powerpc_data_min] = 0xFFFFFFFFFFFFFFFF;
+ perfdata_miss[i][powerpc_data_max] = 0x0000000000000000;
+ perfdata_miss[i][powerpc_data_sum] = 0x0000000000000000;
+ perfdata_miss[i][powerpc_data_num] = 0x0000000000000000;
+#endif /* POWERPC_PERF_USE_PMC */
+ }
+ }
+#endif /* POWERPC_TBL_PERFORMANCE_REPORT */
} else
-#endif
+#endif /* HAVE_ALTIVEC */
{
// Non-AltiVec PPC optimisations
diff --git a/src/libffmpeg/libavcodec/ppc/fft_altivec.c b/src/libffmpeg/libavcodec/ppc/fft_altivec.c
index 1a926b77c..992be5b8e 100644
--- a/src/libffmpeg/libavcodec/ppc/fft_altivec.c
+++ b/src/libffmpeg/libavcodec/ppc/fft_altivec.c
@@ -1,7 +1,7 @@
/*
* FFT/IFFT transforms
* AltiVec-enabled
- * Copyright (c) 2002 Romain Dolbeau <romain@dolbeau.org>
+ * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
* Based on code Copyright (c) 2002 Fabrice Bellard.
*
* This library is free software; you can redistribute it and/or
@@ -22,30 +22,30 @@
#include "dsputil_altivec.h"
-// used to build registers permutation vectors (vcprm)
-// the 's' are for words in the _s_econd vector
-#define WORD_0 0x00,0x01,0x02,0x03
-#define WORD_1 0x04,0x05,0x06,0x07
-#define WORD_2 0x08,0x09,0x0a,0x0b
-#define WORD_3 0x0c,0x0d,0x0e,0x0f
-#define WORD_s0 0x10,0x11,0x12,0x13
-#define WORD_s1 0x14,0x15,0x16,0x17
-#define WORD_s2 0x18,0x19,0x1a,0x1b
-#define WORD_s3 0x1c,0x1d,0x1e,0x1f
-
-#define vcprm(a,b,c,d) (const vector unsigned char)(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
-
-// vcprmle is used to keep the same index as in the SSE version.
-// it's the same as vcprm, with the index inversed
-// ('le' is Little Endian)
-#define vcprmle(a,b,c,d) vcprm(d,c,b,a)
-
-// used to build inverse/identity vectors (vcii)
-// n is _n_egative, p is _p_ositive
-#define FLOAT_n -1.
-#define FLOAT_p 1.
+/*
+ those three macros are from libavcodec/fft.c
+ and are required for the reference C code
+*/
+/* butter fly op */
+#define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \
+{\
+ FFTSample ax, ay, bx, by;\
+ bx=pre1;\
+ by=pim1;\
+ ax=qre1;\
+ ay=qim1;\
+ pre = (bx + ax);\
+ pim = (by + ay);\
+ qre = (bx - ax);\
+ qim = (by - ay);\
+}
+#define MUL16(a,b) ((a) * (b))
+#define CMUL(pre, pim, are, aim, bre, bim) \
+{\
+ pre = (MUL16(are, bre) - MUL16(aim, bim));\
+ pim = (MUL16(are, bim) + MUL16(bre, aim));\
+}
-#define vcii(a,b,c,d) (const vector float)(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
/**
* Do a complex FFT with the parameters defined in fft_init(). The
@@ -55,20 +55,94 @@
* This code assumes that the 'z' pointer is 16 bytes-aligned
* It also assumes all FFTComplex are 8 bytes-aligned pair of float
* The code is exactly the same as the SSE version, except
- * that successive MUL + ADD/SUB have been fusionned into
+ * that successive MUL + ADD/SUB have been merged into
* fused multiply-add ('vec_madd' in altivec)
- *
- * To test this code you can use fft-test in libavcodec ; use
- * the following line in libavcodec to compile (MacOS X):
- * #####
- * gcc -I. -Ippc -no-cpp-precomp -pipe -O3 -fomit-frame-pointer -mdynamic-no-pic -Wall
- * -faltivec -DARCH_POWERPC -DHAVE_ALTIVEC -DCONFIG_DARWIN fft-test.c fft.c
- * ppc/fft_altivec.c ppc/dsputil_altivec.c mdct.c -DHAVE_LRINTF -o fft-test
- * #####
*/
void fft_calc_altivec(FFTContext *s, FFTComplex *z)
{
- register const vector float vczero = (vector float)( 0., 0., 0., 0.);
+POWERPC_TBL_DECLARE(altivec_fft_num, s->nbits >= 6);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ int ln = s->nbits;
+ int j, np, np2;
+ int nblocks, nloops;
+ register FFTComplex *p, *q;
+ FFTComplex *exptab = s->exptab;
+ int l;
+ FFTSample tmp_re, tmp_im;
+
+POWERPC_TBL_START_COUNT(altivec_fft_num, s->nbits >= 6);
+
+ np = 1 << ln;
+
+ /* pass 0 */
+
+ p=&z[0];
+ j=(np >> 1);
+ do {
+ BF(p[0].re, p[0].im, p[1].re, p[1].im,
+ p[0].re, p[0].im, p[1].re, p[1].im);
+ p+=2;
+ } while (--j != 0);
+
+ /* pass 1 */
+
+
+ p=&z[0];
+ j=np >> 2;
+ if (s->inverse) {
+ do {
+ BF(p[0].re, p[0].im, p[2].re, p[2].im,
+ p[0].re, p[0].im, p[2].re, p[2].im);
+ BF(p[1].re, p[1].im, p[3].re, p[3].im,
+ p[1].re, p[1].im, -p[3].im, p[3].re);
+ p+=4;
+ } while (--j != 0);
+ } else {
+ do {
+ BF(p[0].re, p[0].im, p[2].re, p[2].im,
+ p[0].re, p[0].im, p[2].re, p[2].im);
+ BF(p[1].re, p[1].im, p[3].re, p[3].im,
+ p[1].re, p[1].im, p[3].im, -p[3].re);
+ p+=4;
+ } while (--j != 0);
+ }
+ /* pass 2 .. ln-1 */
+
+ nblocks = np >> 3;
+ nloops = 1 << 2;
+ np2 = np >> 1;
+ do {
+ p = z;
+ q = z + nloops;
+ for (j = 0; j < nblocks; ++j) {
+ BF(p->re, p->im, q->re, q->im,
+ p->re, p->im, q->re, q->im);
+
+ p++;
+ q++;
+ for(l = nblocks; l < np2; l += nblocks) {
+ CMUL(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im);
+ BF(p->re, p->im, q->re, q->im,
+ p->re, p->im, tmp_re, tmp_im);
+ p++;
+ q++;
+ }
+
+ p += nloops;
+ q += nloops;
+ }
+ nblocks = nblocks >> 1;
+ nloops = nloops << 1;
+ } while (nblocks != 0);
+
+POWERPC_TBL_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
+
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+#ifdef CONFIG_DARWIN
+ register const vector float vczero = (const vector float)(0.);
+#else
+ register const vector float vczero = (const vector float){0.,0.,0.,0.};
+#endif
int ln = s->nbits;
int j, np, np2;
@@ -77,6 +151,8 @@ void fft_calc_altivec(FFTContext *s, FFTComplex *z)
FFTComplex *cptr, *cptr1;
int k;
+POWERPC_TBL_START_COUNT(altivec_fft_num, s->nbits >= 6);
+
np = 1 << ln;
{
@@ -162,5 +238,8 @@ void fft_calc_altivec(FFTContext *s, FFTComplex *z)
nblocks = nblocks >> 1;
nloops = nloops << 1;
} while (nblocks != 0);
-}
+POWERPC_TBL_STOP_COUNT(altivec_fft_num, s->nbits >= 6);
+
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+}
diff --git a/src/libffmpeg/libavcodec/ppc/idct_altivec.c b/src/libffmpeg/libavcodec/ppc/idct_altivec.c
index 8036d403f..1619f1731 100644
--- a/src/libffmpeg/libavcodec/ppc/idct_altivec.c
+++ b/src/libffmpeg/libavcodec/ppc/idct_altivec.c
@@ -38,6 +38,7 @@
#include <stdlib.h> /* malloc(), free() */
#include <string.h>
#include "../dsputil.h"
+#include "dsputil_altivec.h"
#define vector_s16_t vector signed short
#define vector_u16_t vector unsigned short
@@ -150,6 +151,8 @@
vx6 = vec_sra (vy6, shift); \
vx7 = vec_sra (vy7, shift);
+
+#ifdef CONFIG_DARWIN
static const vector_s16_t constants[5] = {
(vector_s16_t)(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
(vector_s16_t)(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
@@ -157,11 +160,30 @@ static const vector_s16_t constants[5] = {
(vector_s16_t)(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692),
(vector_s16_t)(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722)
};
+#else
+// broken gcc
+static const vector_s16_t constants[5] = {
+ (vector_s16_t){23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
+ (vector_s16_t){16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
+ (vector_s16_t){22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
+ (vector_s16_t){21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
+ (vector_s16_t){19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
+};
+#endif
void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
{
+POWERPC_TBL_DECLARE(altivec_idct_put_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+POWERPC_TBL_START_COUNT(altivec_idct_put_num, 1);
+ void simple_idct_put(UINT8 *dest, int line_size, INT16 *block);
+ simple_idct_put(dest, stride, (INT16*)block);
+POWERPC_TBL_STOP_COUNT(altivec_idct_put_num, 1);
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
vector_u8_t tmp;
+POWERPC_TBL_START_COUNT(altivec_idct_put_num, 1);
+
IDCT
#define COPY(dest,src) \
@@ -177,16 +199,28 @@ void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
COPY (dest, vx5) dest += stride;
COPY (dest, vx6) dest += stride;
COPY (dest, vx7)
+
+POWERPC_TBL_STOP_COUNT(altivec_idct_put_num, 1);
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block)
{
+POWERPC_TBL_DECLARE(altivec_idct_add_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+POWERPC_TBL_START_COUNT(altivec_idct_add_num, 1);
+ void simple_idct_add(UINT8 *dest, int line_size, INT16 *block);
+ simple_idct_add(dest, stride, (INT16*)block);
+POWERPC_TBL_STOP_COUNT(altivec_idct_add_num, 1);
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
vector_u8_t tmp;
vector_s16_t tmp2, tmp3;
vector_u8_t perm0;
vector_u8_t perm1;
vector_u8_t p0, p1, p;
+POWERPC_TBL_START_COUNT(altivec_idct_add_num, 1);
+
IDCT
p0 = vec_lvsl (0, dest);
@@ -212,5 +246,8 @@ void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block)
ADD (dest, vx5, perm1) dest += stride;
ADD (dest, vx6, perm0) dest += stride;
ADD (dest, vx7, perm1)
+
+POWERPC_TBL_STOP_COUNT(altivec_idct_add_num, 1);
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c b/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c
index bcbc1e6ba..dd898e158 100644
--- a/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c
+++ b/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c
@@ -20,10 +20,7 @@
#include <stdio.h>
#include "../dsputil.h"
#include "../mpegvideo.h"
-
-
-// Used when initializing constant vectors
-#define FOUR_INSTANCES(x) x,x,x,x
+#include "dsputil_altivec.h"
// Swaps two variables (used for altivec registers)
#define SWAP(a,b) \
@@ -93,6 +90,13 @@ do { \
vec = vec_splat(vec, 0); \
}
+
+#ifdef CONFIG_DARWIN
+#define FOUROF(a) (a)
+#else
+// slower, for dumb non-apple GCC
+#define FOUROF(a) {a,a,a,a}
+#endif
int dct_quantize_altivec(MpegEncContext* s,
DCTELEM* data, int n,
int qscale, int* overflow)
@@ -100,7 +104,7 @@ int dct_quantize_altivec(MpegEncContext* s,
int lastNonZero;
vector float row0, row1, row2, row3, row4, row5, row6, row7;
vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
- const vector float zero = {FOUR_INSTANCES(0.0f)};
+ const vector float zero = (const vector float)FOUROF(0.);
// Load the data into the row/alt vectors
{
@@ -144,18 +148,18 @@ int dct_quantize_altivec(MpegEncContext* s,
// in the vector local variables, as floats, which we'll use during the
// quantize step...
{
- const vector float vec_0_298631336 = {FOUR_INSTANCES(0.298631336f)};
- const vector float vec_0_390180644 = {FOUR_INSTANCES(-0.390180644f)};
- const vector float vec_0_541196100 = {FOUR_INSTANCES(0.541196100f)};
- const vector float vec_0_765366865 = {FOUR_INSTANCES(0.765366865f)};
- const vector float vec_0_899976223 = {FOUR_INSTANCES(-0.899976223f)};
- const vector float vec_1_175875602 = {FOUR_INSTANCES(1.175875602f)};
- const vector float vec_1_501321110 = {FOUR_INSTANCES(1.501321110f)};
- const vector float vec_1_847759065 = {FOUR_INSTANCES(-1.847759065f)};
- const vector float vec_1_961570560 = {FOUR_INSTANCES(-1.961570560f)};
- const vector float vec_2_053119869 = {FOUR_INSTANCES(2.053119869f)};
- const vector float vec_2_562915447 = {FOUR_INSTANCES(-2.562915447f)};
- const vector float vec_3_072711026 = {FOUR_INSTANCES(3.072711026f)};
+ const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f);
+ const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f);
+ const vector float vec_0_541196100 = (vector float)FOUROF(0.541196100f);
+ const vector float vec_0_765366865 = (vector float)FOUROF(0.765366865f);
+ const vector float vec_0_899976223 = (vector float)FOUROF(-0.899976223f);
+ const vector float vec_1_175875602 = (vector float)FOUROF(1.175875602f);
+ const vector float vec_1_501321110 = (vector float)FOUROF(1.501321110f);
+ const vector float vec_1_847759065 = (vector float)FOUROF(-1.847759065f);
+ const vector float vec_1_961570560 = (vector float)FOUROF(-1.961570560f);
+ const vector float vec_2_053119869 = (vector float)FOUROF(2.053119869f);
+ const vector float vec_2_562915447 = (vector float)FOUROF(-2.562915447f);
+ const vector float vec_3_072711026 = (vector float)FOUROF(3.072711026f);
int whichPass, whichHalf;
@@ -309,7 +313,7 @@ int dct_quantize_altivec(MpegEncContext* s,
// rounding when we convert to int, instead of flooring.)
{
vector signed int biasInt;
- const vector float negOneFloat = (vector float)(FOUR_INSTANCES(-1.0f));
+ const vector float negOneFloat = (vector float)FOUROF(-1.0f);
LOAD4(biasInt, biasAddr);
bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT);
negBias = vec_madd(bias, negOneFloat, zero);
@@ -506,4 +510,133 @@ int dct_quantize_altivec(MpegEncContext* s,
return lastNonZero;
}
+#undef FOUROF
+
+/*
+ AltiVec version of dct_unquantize_h263
+ this code assumes `block' is 16 bytes-aligned
+*/
+void dct_unquantize_h263_altivec(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale)
+{
+POWERPC_TBL_DECLARE(altivec_dct_unquantize_h263_num, 1);
+ int i, level, qmul, qadd;
+ int nCoeffs;
+
+ assert(s->block_last_index[n]>=0);
+
+POWERPC_TBL_START_COUNT(altivec_dct_unquantize_h263_num, 1);
+
+ qadd = (qscale - 1) | 1;
+ qmul = qscale << 1;
+
+ if (s->mb_intra) {
+ if (!s->h263_aic) {
+ if (n < 4)
+ block[0] = block[0] * s->y_dc_scale;
+ else
+ block[0] = block[0] * s->c_dc_scale;
+ }else
+ qadd = 0;
+ i = 1;
+ nCoeffs= 63; //does not allways use zigzag table
+ } else {
+ i = 0;
+ nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
+ }
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+ for(;i<=nCoeffs;i++) {
+ level = block[i];
+ if (level) {
+ if (level < 0) {
+ level = level * qmul - qadd;
+ } else {
+ level = level * qmul + qadd;
+ }
+ block[i] = level;
+ }
+ }
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ {
+ register const vector short vczero = (const vector short)vec_splat_s16(0);
+ short __attribute__ ((aligned(16))) qmul8[] =
+ {
+ qmul, qmul, qmul, qmul,
+ qmul, qmul, qmul, qmul
+ };
+ short __attribute__ ((aligned(16))) qadd8[] =
+ {
+ qadd, qadd, qadd, qadd,
+ qadd, qadd, qadd, qadd
+ };
+ short __attribute__ ((aligned(16))) nqadd8[] =
+ {
+ -qadd, -qadd, -qadd, -qadd,
+ -qadd, -qadd, -qadd, -qadd
+ };
+ register vector short blockv, qmulv, qaddv, nqaddv, temp1;
+ register vector bool short blockv_null, blockv_neg;
+ register short backup_0 = block[0];
+ register int j = 0;
+
+ qmulv = vec_ld(0, qmul8);
+ qaddv = vec_ld(0, qadd8);
+ nqaddv = vec_ld(0, nqadd8);
+
+#if 0 // block *is* 16 bytes-aligned, it seems.
+ // first make sure block[j] is 16 bytes-aligned
+ for(j = 0; (j <= nCoeffs) && ((((unsigned long)block) + (j << 1)) & 0x0000000F) ; j++) {
+ level = block[j];
+ if (level) {
+ if (level < 0) {
+ level = level * qmul - qadd;
+ } else {
+ level = level * qmul + qadd;
+ }
+ block[j] = level;
+ }
+ }
+#endif
+
+ // vectorize all the 16 bytes-aligned blocks
+ // of 8 elements
+ for(; (j + 7) <= nCoeffs ; j+=8)
+ {
+ blockv = vec_ld(j << 1, block);
+ blockv_neg = vec_cmplt(blockv, vczero);
+ blockv_null = vec_cmpeq(blockv, vczero);
+ // choose between +qadd or -qadd as the third operand
+ temp1 = vec_sel(qaddv, nqaddv, blockv_neg);
+ // multiply & add (block{i,i+7} * qmul [+-] qadd)
+ temp1 = vec_mladd(blockv, qmulv, temp1);
+ // put 0 where block[{i,i+7} used to have 0
+ blockv = vec_sel(temp1, blockv, blockv_null);
+ vec_st(blockv, j << 1, block);
+ }
+
+ // if nCoeffs isn't a multiple of 8, finish the job
+ // using good old scalar units.
+ // (we could do it using a truncated vector,
+ // but I'm not sure it's worth the hassle)
+ for(; j <= nCoeffs ; j++) {
+ level = block[j];
+ if (level) {
+ if (level < 0) {
+ level = level * qmul - qadd;
+ } else {
+ level = level * qmul + qadd;
+ }
+ block[j] = level;
+ }
+ }
+
+ if (i == 1)
+ { // cheat. this avoid special-casing the first iteration
+ block[0] = backup_0;
+ }
+ }
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+
+POWERPC_TBL_STOP_COUNT(altivec_dct_unquantize_h263_num, nCoeffs == 63);
+}
diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c b/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c
index 94d608b63..9757f5f39 100644
--- a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c
+++ b/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c
@@ -27,6 +27,8 @@
extern int dct_quantize_altivec(MpegEncContext *s,
DCTELEM *block, int n,
int qscale, int *overflow);
+extern void dct_unquantize_h263_altivec(MpegEncContext *s,
+ DCTELEM *block, int n, int qscale);
extern void idct_put_altivec(UINT8 *dest, int line_size, INT16 *block);
extern void idct_add_altivec(UINT8 *dest, int line_size, INT16 *block);
@@ -42,7 +44,11 @@ void MPV_common_init_ppc(MpegEncContext *s)
{
s->idct_put = idct_put_altivec;
s->idct_add = idct_add_altivec;
+#ifndef ALTIVEC_USE_REFERENCE_C_CODE
s->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ s->idct_permutation_type = FF_NO_IDCT_PERM;
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
// Test to make sure that the dct required alignments are met.
@@ -66,6 +72,7 @@ void MPV_common_init_ppc(MpegEncContext *s)
(s->avctx->dct_algo == FF_DCT_ALTIVEC))
{
s->dct_quantize = dct_quantize_altivec;
+ s->dct_unquantize_h263 = dct_unquantize_h263_altivec;
}
} else
#endif