summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Melanson <mike@multimedia.cx>2005-04-19 05:10:48 +0000
committerMike Melanson <mike@multimedia.cx>2005-04-19 05:10:48 +0000
commit19e7199dad84489aa49e3b2dd5c0e45657ec0fb8 (patch)
treecf8d1d84366dcfec8c1b0510979061b546792b22
parent960785eafb7934d77371d6e41e182e6285613921 (diff)
downloadxine-lib-19e7199dad84489aa49e3b2dd5c0e45657ec0fb8.tar.gz
xine-lib-19e7199dad84489aa49e3b2dd5c0e45657ec0fb8.tar.bz2
sync to FFmpeg build 4752
CVS patchset: 7462 CVS date: 2005/04/19 05:10:48
-rw-r--r--src/libffmpeg/libavcodec/ppc/Makefile.am2
-rw-r--r--src/libffmpeg/libavcodec/ppc/dsputil_altivec.c130
-rw-r--r--src/libffmpeg/libavcodec/ppc/dsputil_altivec.h1
-rwxr-xr-xsrc/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c266
-rwxr-xr-xsrc/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c639
-rw-r--r--src/libffmpeg/libavcodec/ppc/dsputil_ppc.c35
-rw-r--r--src/libffmpeg/libavcodec/ppc/dsputil_ppc.h16
-rw-r--r--src/libffmpeg/libavcodec/ppc/fdct_altivec.c114
8 files changed, 1131 insertions, 72 deletions
diff --git a/src/libffmpeg/libavcodec/ppc/Makefile.am b/src/libffmpeg/libavcodec/ppc/Makefile.am
index 50b9d802e..0fab4b916 100644
--- a/src/libffmpeg/libavcodec/ppc/Makefile.am
+++ b/src/libffmpeg/libavcodec/ppc/Makefile.am
@@ -11,6 +11,8 @@ noinst_LTLIBRARIES = libavcodec_ppc.la
libavcodec_ppc_src = dsputil_altivec.c \
dsputil_ppc.c \
+ dsputil_h264_altivec.c \
+ dsputil_h264_template_altivec.c \
fdct_altivec.c \
fft_altivec.c \
idct_altivec.c \
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c
index ff6e870b7..57b687dfd 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c
@@ -27,6 +27,11 @@
#ifdef CONFIG_DARWIN
#include <sys/sysctl.h>
#else /* CONFIG_DARWIN */
+#ifdef __AMIGAOS4__
+#include <exec/exec.h>
+#include <interfaces/exec.h>
+#include <proto/exec.h>
+#else /* __AMIGAOS4__ */
#include <signal.h>
#include <setjmp.h>
@@ -44,6 +49,7 @@ static void sigill_handler (int sig)
siglongjmp (jmpbuf, 1);
}
#endif /* CONFIG_DARWIN */
+#endif /* __AMIGAOS4__ */
int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
@@ -1302,7 +1308,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
-#if (__GNUC__ * 100 + __GNUC_MINOR__ >= 330)
+#ifdef CONFIG_DARWIN
int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
int sum;
@@ -1612,10 +1618,19 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
return score;
}
-#endif
+#endif //CONFIG_DARWIN
int has_altivec(void)
{
+#ifdef __AMIGAOS4__
+ ULONG result = 0;
+ extern struct ExecIFace *IExec;
+
+ IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE);
+ if (result == VECTORTYPE_ALTIVEC) return 1;
+ return 0;
+#else /* __AMIGAOS4__ */
+
#ifdef CONFIG_DARWIN
int sels[2] = {CTL_HW, HW_VECTORUNIT};
int has_vu = 0;
@@ -1646,4 +1661,115 @@ int has_altivec(void)
}
#endif /* CONFIG_DARWIN */
return 0;
+#endif /* __AMIGAOS4__ */
+}
+
+/* next one assumes that ((line_size % 8) == 0) */
+void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h)
+{
+POWERPC_PERF_DECLARE(altivec_avg_pixels8_xy2_num, 1);
+#ifdef ALTIVEC_USE_REFERENCE_C_CODE
+
+ int j;
+POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
+ for (j = 0; j < 2; j++) {
+ int i;
+ const uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ const uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
+ uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ uint32_t l1, h1;
+ pixels += line_size;
+ for (i = 0; i < h; i += 2) {
+ uint32_t a = (((const struct unaligned_32 *) (pixels))->l);
+ uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l1 = (a & 0x03030303UL) + (b & 0x03030303UL);
+ h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
+ pixels += line_size;
+ block += line_size;
+ a = (((const struct unaligned_32 *) (pixels))->l);
+ b = (((const struct unaligned_32 *) (pixels + 1))->l);
+ l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL;
+ h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2);
+ *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL));
+ pixels += line_size;
+ block += line_size;
+ } pixels += 4 - line_size * (h + 1);
+ block += 4 - line_size * h;
+ }
+POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
+#else /* ALTIVEC_USE_REFERENCE_C_CODE */
+ register int i;
+ register vector unsigned char
+ pixelsv1, pixelsv2,
+ pixelsavg;
+ register vector unsigned char
+ blockv, temp1, temp2, blocktemp;
+ register vector unsigned short
+ pixelssum1, pixelssum2, temp3;
+ register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0);
+ register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2);
+
+ temp1 = vec_ld(0, pixels);
+ temp2 = vec_ld(16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
+ if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
+ }
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum1 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ pixelssum1 = vec_add(pixelssum1, vctwo);
+
+POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1);
+ for (i = 0; i < h ; i++) {
+ int rightside = ((unsigned long)block & 0x0000000F);
+ blockv = vec_ld(0, block);
+
+ temp1 = vec_ld(line_size, pixels);
+ temp2 = vec_ld(line_size + 16, pixels);
+ pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
+ if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
+ {
+ pixelsv2 = temp2;
+ }
+ else
+ {
+ pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
+ }
+
+ pixelsv1 = vec_mergeh(vczero, pixelsv1);
+ pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelssum2 = vec_add((vector unsigned short)pixelsv1,
+ (vector unsigned short)pixelsv2);
+ temp3 = vec_add(pixelssum1, pixelssum2);
+ temp3 = vec_sra(temp3, vctwo);
+ pixelssum1 = vec_add(pixelssum2, vctwo);
+ pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero);
+
+ if (rightside)
+ {
+ blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1));
+ }
+ else
+ {
+ blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3));
+ }
+
+ blockv = vec_avg(blocktemp, blockv);
+ vec_st(blockv, 0, block);
+
+ block += line_size;
+ pixels += line_size;
+ }
+
+POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1);
+#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
}
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h
index e2729ab22..88f06c372 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h
@@ -48,6 +48,7 @@ extern void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, in
extern void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h);
extern int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h);
extern int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h);
+extern void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h);
extern void gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder);
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c
new file mode 100755
index 000000000..1891e194a
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+
+#include "dsputil_altivec.h"
+
+#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
+#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
+
+#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
+#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
+#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
+#define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
+#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
+#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
+#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
+#include "dsputil_h264_template_altivec.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#undef PREFIX_h264_qpel16_h_lowpass_altivec
+#undef PREFIX_h264_qpel16_h_lowpass_num
+#undef PREFIX_h264_qpel16_v_lowpass_altivec
+#undef PREFIX_h264_qpel16_v_lowpass_num
+#undef PREFIX_h264_qpel16_hv_lowpass_altivec
+#undef PREFIX_h264_qpel16_hv_lowpass_num
+
+#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
+#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
+#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
+#define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
+#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
+#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
+#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
+#include "dsputil_h264_template_altivec.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#undef PREFIX_h264_qpel16_h_lowpass_altivec
+#undef PREFIX_h264_qpel16_h_lowpass_num
+#undef PREFIX_h264_qpel16_v_lowpass_altivec
+#undef PREFIX_h264_qpel16_v_lowpass_num
+#undef PREFIX_h264_qpel16_hv_lowpass_altivec
+#undef PREFIX_h264_qpel16_hv_lowpass_num
+
+#define H264_MC(OPNAME, SIZE, CODETYPE) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
+ uint64_t temp[SIZE*SIZE/8] __align16;\
+ uint8_t * const half= (uint8_t*)temp;\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*SIZE/8] __align16;\
+ uint8_t * const half= (uint8_t*)temp;\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*SIZE/8] __align16;\
+ uint8_t * const half= (uint8_t*)temp;\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*SIZE/8] __align16;\
+ uint8_t * const half= (uint8_t*)temp;\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*SIZE/4] __align16;\
+ uint8_t * const halfH= (uint8_t*)temp;\
+ uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*SIZE/4] __align16;\
+ uint8_t * const halfH= (uint8_t*)temp;\
+ uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*SIZE/4] __align16;\
+ uint8_t * const halfH= (uint8_t*)temp;\
+ uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*SIZE/4] __align16;\
+ uint8_t * const halfH= (uint8_t*)temp;\
+ uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*(SIZE+8)/4] __align16;\
+ int16_t * const tmp= (int16_t*)temp;\
+ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4] __align16;\
+ uint8_t * const halfH= (uint8_t*)temp;\
+ uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
+ int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4] __align16;\
+ uint8_t * const halfH= (uint8_t*)temp;\
+ uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
+ int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4] __align16;\
+ uint8_t * const halfV= (uint8_t*)temp;\
+ uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
+ int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4] __align16;\
+ uint8_t * const halfV= (uint8_t*)temp;\
+ uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\
+ int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
+}\
+
+
+/* from dsputil.c */
+static inline void put_pixels8_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) {
+ int i;
+ for (i = 0; i < h; i++) {
+ uint32_t a, b;
+ a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l);
+ b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l);
+ *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(a, b);
+ a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l);
+ b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l);
+ *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(a, b);
+ }
+} static inline void avg_pixels8_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) {
+ int i;
+ for (i = 0; i < h; i++) {
+ uint32_t a, b;
+ a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l);
+ b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l);
+ *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride]), rnd_avg32(a, b));
+ a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l);
+ b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l);
+ *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride + 4]), rnd_avg32(a, b));
+ }
+} static inline void put_pixels16_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) {
+ put_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h);
+ put_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h);
+} static inline void avg_pixels16_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) {
+ avg_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h);
+ avg_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h);
+}
+
+/* UNIMPLEMENTED YET !! */
+#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
+#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
+
+H264_MC(put_, 16, altivec)
+ H264_MC(avg_, 16, altivec)
+
+void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
+
+#ifdef HAVE_ALTIVEC
+ if (has_altivec()) {
+ c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
+ c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
+
+#define dspfunc(PFX, IDX, NUM) \
+ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
+ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
+ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
+ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
+ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
+ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
+ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
+
+ dspfunc(put_h264_qpel, 0, 16);
+ dspfunc(avg_h264_qpel, 0, 16);
+#undef dspfunc
+
+ } else
+#endif /* HAVE_ALTIVEC */
+ {
+ // Non-AltiVec PPC optimisations
+
+ // ... pending ...
+ }
+}
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c
new file mode 100755
index 000000000..cb0fa954d
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c
@@ -0,0 +1,639 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/* this code assume that stride % 16 == 0 */
+void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
+ POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
+ POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
+ signed int ABCD[4] __attribute__((aligned(16)));
+ register int i;
+ ABCD[0] = ((8 - x) * (8 - y));
+ ABCD[1] = ((x) * (8 - y));
+ ABCD[2] = ((8 - x) * (y));
+ ABCD[3] = ((x) * (y));
+ const vector signed int vABCD = vec_ld(0, ABCD);
+ const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
+ const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
+ const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
+ const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector signed short v32ss = (const vector signed short)AVV(32);
+ const vector unsigned short v6us = vec_splat_u16(6);
+
+ vector unsigned char fperm;
+
+ if (((unsigned long)dst) % 16 == 0) {
+ fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F);
+ } else {
+ fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
+ }
+
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+ vector unsigned char vsrcAuc;
+ vector unsigned char vsrcBuc;
+ vector unsigned char vsrcperm0;
+ vector unsigned char vsrcperm1;
+ vsrcAuc = vec_ld(0, src);
+ if (loadSecond)
+ vsrcBuc = vec_ld(16, src);
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+
+ vector unsigned char vsrc0uc;
+ vector unsigned char vsrc1uc;
+ vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc1uc = vsrcBuc;
+ else
+ vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+ vector signed short vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc);
+ vector signed short vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc);
+
+ if (!loadSecond) {// -> !reallyBadAlign
+ for (i = 0 ; i < h ; i++) {
+ vector unsigned char vsrcCuc;
+ vsrcCuc = vec_ld(stride + 0, src);
+
+ vector unsigned char vsrc2uc;
+ vector unsigned char vsrc3uc;
+ vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+ vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+ vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
+ vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
+
+ vector signed short psum;
+
+ psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+ psum = vec_mladd(vB, vsrc1ssH, psum);
+ psum = vec_mladd(vC, vsrc2ssH, psum);
+ psum = vec_mladd(vD, vsrc3ssH, psum);
+ psum = vec_add(v32ss, psum);
+ psum = vec_sra(psum, v6us);
+
+ vector unsigned char vdst = vec_ld(0, dst);
+ vector unsigned char ppsum = (vector unsigned char)vec_packsu(psum, psum);
+
+ vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
+ vector unsigned char fsum;
+
+ OP_U8_ALTIVEC(fsum, vfdst, vdst);
+
+ vec_st(fsum, 0, dst);
+
+ vsrc0ssH = vsrc2ssH;
+ vsrc1ssH = vsrc3ssH;
+
+ dst += stride;
+ src += stride;
+ }
+ } else {
+ for (i = 0 ; i < h ; i++) {
+ vector unsigned char vsrcCuc;
+ vector unsigned char vsrcDuc;
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrcDuc = vec_ld(stride + 16, src);
+
+ vector unsigned char vsrc2uc;
+ vector unsigned char vsrc3uc;
+ vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc3uc = vsrcDuc;
+ else
+ vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+ vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc);
+ vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc);
+
+ vector signed short psum;
+
+ psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+ psum = vec_mladd(vB, vsrc1ssH, psum);
+ psum = vec_mladd(vC, vsrc2ssH, psum);
+ psum = vec_mladd(vD, vsrc3ssH, psum);
+ psum = vec_add(v32ss, psum);
+ psum = vec_sr(psum, v6us);
+
+ vector unsigned char vdst = vec_ld(0, dst);
+ vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum);
+
+ vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm);
+ vector unsigned char fsum;
+
+ OP_U8_ALTIVEC(fsum, vfdst, vdst);
+
+ vec_st(fsum, 0, dst);
+
+ vsrc0ssH = vsrc2ssH;
+ vsrc1ssH = vsrc3ssH;
+
+ dst += stride;
+ src += stride;
+ }
+ }
+ POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
+}
+
+/* this code assume stride % 16 == 0 */
+static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
+ POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
+ POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
+ register int i;
+
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector unsigned char permM2 = vec_lvsl(-2, src);
+ const vector unsigned char permM1 = vec_lvsl(-1, src);
+ const vector unsigned char permP0 = vec_lvsl(+0, src);
+ const vector unsigned char permP1 = vec_lvsl(+1, src);
+ const vector unsigned char permP2 = vec_lvsl(+2, src);
+ const vector unsigned char permP3 = vec_lvsl(+3, src);
+ const vector signed short v20ss = (const vector signed short)AVV(20);
+ const vector unsigned short v5us = vec_splat_u16(5);
+ const vector signed short v5ss = vec_splat_s16(5);
+ const vector signed short v16ss = (const vector signed short)AVV(16);
+ const vector unsigned char dstperm = vec_lvsr(0, dst);
+ const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
+ const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
+
+ register int align = ((((unsigned long)src) - 2) % 16);
+
+ for (i = 0 ; i < 16 ; i ++) {
+ vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
+ vector unsigned char srcR1 = vec_ld(-2, src);
+ vector unsigned char srcR2 = vec_ld(14, src);
+
+ switch (align) {
+ default: {
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = vec_perm(srcR1, srcR2, permP2);
+ srcP3 = vec_perm(srcR1, srcR2, permP3);
+ } break;
+ case 11: {
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = vec_perm(srcR1, srcR2, permP2);
+ srcP3 = srcR2;
+ } break;
+ case 12: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = srcR2;
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 13: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = srcR2;
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 14: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = srcR2;
+ srcP1 = vec_perm(srcR2, srcR3, permP1);
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 15: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = srcR2;
+ srcP0 = vec_perm(srcR2, srcR3, permP0);
+ srcP1 = vec_perm(srcR2, srcR3, permP1);
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ }
+
+ const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
+ const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
+ const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
+ const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);
+
+ const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
+ const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);
+ const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
+ const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);
+
+ const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
+ const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
+ const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
+ const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);
+
+ const vector signed short sum1A = vec_adds(srcP0A, srcP1A);
+ const vector signed short sum1B = vec_adds(srcP0B, srcP1B);
+ const vector signed short sum2A = vec_adds(srcM1A, srcP2A);
+ const vector signed short sum2B = vec_adds(srcM1B, srcP2B);
+ const vector signed short sum3A = vec_adds(srcM2A, srcP3A);
+ const vector signed short sum3B = vec_adds(srcM2B, srcP3B);
+
+ const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss);
+ const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss);
+
+ const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+ const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+ const vector signed short pp3A = vec_add(sum3A, pp1A);
+ const vector signed short pp3B = vec_add(sum3B, pp1B);
+
+ const vector signed short psumA = vec_sub(pp3A, pp2A);
+ const vector signed short psumB = vec_sub(pp3B, pp2B);
+
+ const vector signed short sumA = vec_sra(psumA, v5us);
+ const vector signed short sumB = vec_sra(psumB, v5us);
+
+ const vector unsigned char sum = vec_packsu(sumA, sumB);
+
+ const vector unsigned char dst1 = vec_ld(0, dst);
+ const vector unsigned char dst2 = vec_ld(16, dst);
+ const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+ vector unsigned char fsum;
+ OP_U8_ALTIVEC(fsum, sum, vdst);
+
+ const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
+ const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
+ const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);
+
+ vec_st(fdst1, 0, dst);
+ vec_st(fdst2, 16, dst);
+
+ src += srcStride;
+ dst += dstStride;
+ }
+POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
+}
+
+/* this code assume stride % 16 == 0 */
+static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
+ POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);
+ POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
+
+ register int i;
+
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector unsigned char perm = vec_lvsl(0, src);
+ const vector signed short v20ss = (const vector signed short)AVV(20);
+ const vector unsigned short v5us = vec_splat_u16(5);
+ const vector signed short v5ss = vec_splat_s16(5);
+ const vector signed short v16ss = (const vector signed short)AVV(16);
+ const vector unsigned char dstperm = vec_lvsr(0, dst);
+ const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
+ const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
+
+ uint8_t *srcbis = src - (srcStride * 2);
+
+ const vector unsigned char srcM2a = vec_ld(0, srcbis);
+ const vector unsigned char srcM2b = vec_ld(16, srcbis);
+ const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm);
+ srcbis += srcStride;
+ const vector unsigned char srcM1a = vec_ld(0, srcbis);
+ const vector unsigned char srcM1b = vec_ld(16, srcbis);
+ const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm);
+ srcbis += srcStride;
+ const vector unsigned char srcP0a = vec_ld(0, srcbis);
+ const vector unsigned char srcP0b = vec_ld(16, srcbis);
+ const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm);
+ srcbis += srcStride;
+ const vector unsigned char srcP1a = vec_ld(0, srcbis);
+ const vector unsigned char srcP1b = vec_ld(16, srcbis);
+ const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm);
+ srcbis += srcStride;
+ const vector unsigned char srcP2a = vec_ld(0, srcbis);
+ const vector unsigned char srcP2b = vec_ld(16, srcbis);
+ const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm);
+ srcbis += srcStride;
+
+ vector signed short srcM2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
+ vector signed short srcM2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);
+ vector signed short srcM1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
+ vector signed short srcM1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
+ vector signed short srcP0ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
+ vector signed short srcP0ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
+ vector signed short srcP1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
+ vector signed short srcP1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);
+ vector signed short srcP2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
+ vector signed short srcP2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);
+
+ for (i = 0 ; i < 16 ; i++) {
+ const vector unsigned char srcP3a = vec_ld(0, srcbis);
+ const vector unsigned char srcP3b = vec_ld(16, srcbis);
+ const vector unsigned char srcP3 = vec_perm(srcP3a, srcP3b, perm);
+ const vector signed short srcP3ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
+ const vector signed short srcP3ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);
+ srcbis += srcStride;
+
+ const vector signed short sum1A = vec_adds(srcP0ssA, srcP1ssA);
+ const vector signed short sum1B = vec_adds(srcP0ssB, srcP1ssB);
+ const vector signed short sum2A = vec_adds(srcM1ssA, srcP2ssA);
+ const vector signed short sum2B = vec_adds(srcM1ssB, srcP2ssB);
+ const vector signed short sum3A = vec_adds(srcM2ssA, srcP3ssA);
+ const vector signed short sum3B = vec_adds(srcM2ssB, srcP3ssB);
+
+ srcM2ssA = srcM1ssA;
+ srcM2ssB = srcM1ssB;
+ srcM1ssA = srcP0ssA;
+ srcM1ssB = srcP0ssB;
+ srcP0ssA = srcP1ssA;
+ srcP0ssB = srcP1ssB;
+ srcP1ssA = srcP2ssA;
+ srcP1ssB = srcP2ssB;
+ srcP2ssA = srcP3ssA;
+ srcP2ssB = srcP3ssB;
+
+ const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss);
+ const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss);
+
+ const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+ const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+ const vector signed short pp3A = vec_add(sum3A, pp1A);
+ const vector signed short pp3B = vec_add(sum3B, pp1B);
+
+ const vector signed short psumA = vec_sub(pp3A, pp2A);
+ const vector signed short psumB = vec_sub(pp3B, pp2B);
+
+ const vector signed short sumA = vec_sra(psumA, v5us);
+ const vector signed short sumB = vec_sra(psumB, v5us);
+
+ const vector unsigned char sum = vec_packsu(sumA, sumB);
+
+ const vector unsigned char dst1 = vec_ld(0, dst);
+ const vector unsigned char dst2 = vec_ld(16, dst);
+ const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+ vector unsigned char fsum;
+ OP_U8_ALTIVEC(fsum, sum, vdst);
+
+ const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
+ const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
+ const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);
+
+ vec_st(fdst1, 0, dst);
+ vec_st(fdst2, 16, dst);
+
+ dst += dstStride;
+ }
+ POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
+}
+
+/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
+static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
+ POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+ POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+ register int i;
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector unsigned char permM2 = vec_lvsl(-2, src);
+ const vector unsigned char permM1 = vec_lvsl(-1, src);
+ const vector unsigned char permP0 = vec_lvsl(+0, src);
+ const vector unsigned char permP1 = vec_lvsl(+1, src);
+ const vector unsigned char permP2 = vec_lvsl(+2, src);
+ const vector unsigned char permP3 = vec_lvsl(+3, src);
+ const vector signed short v20ss = (const vector signed short)AVV(20);
+ const vector unsigned int v10ui = vec_splat_u32(10);
+ const vector signed short v5ss = vec_splat_s16(5);
+ const vector signed short v1ss = vec_splat_s16(1);
+ const vector signed int v512si = (const vector signed int)AVV(512);
+ const vector unsigned int v16ui = (const vector unsigned int)AVV(16);
+
+ register int align = ((((unsigned long)src) - 2) % 16);
+
+ src -= (2 * srcStride);
+
+ for (i = 0 ; i < 21 ; i ++) {
+ vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
+ vector unsigned char srcR1 = vec_ld(-2, src);
+ vector unsigned char srcR2 = vec_ld(14, src);
+
+ switch (align) {
+ default: {
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = vec_perm(srcR1, srcR2, permP2);
+ srcP3 = vec_perm(srcR1, srcR2, permP3);
+ } break;
+ case 11: {
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = vec_perm(srcR1, srcR2, permP2);
+ srcP3 = srcR2;
+ } break;
+ case 12: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = srcR2;
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 13: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = srcR2;
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 14: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = srcR2;
+ srcP1 = vec_perm(srcR2, srcR3, permP1);
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 15: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = srcR2;
+ srcP0 = vec_perm(srcR2, srcR3, permP0);
+ srcP1 = vec_perm(srcR2, srcR3, permP1);
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ }
+
+ const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0);
+ const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0);
+ const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1);
+ const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1);
+
+ const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2);
+ const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2);
+ const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3);
+ const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3);
+
+ const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1);
+ const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1);
+ const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2);
+ const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2);
+
+ const vector signed short sum1A = vec_adds(srcP0A, srcP1A);
+ const vector signed short sum1B = vec_adds(srcP0B, srcP1B);
+ const vector signed short sum2A = vec_adds(srcM1A, srcP2A);
+ const vector signed short sum2B = vec_adds(srcM1B, srcP2B);
+ const vector signed short sum3A = vec_adds(srcM2A, srcP3A);
+ const vector signed short sum3B = vec_adds(srcM2B, srcP3B);
+
+ const vector signed short pp1A = vec_mladd(sum1A, v20ss, sum3A);
+ const vector signed short pp1B = vec_mladd(sum1B, v20ss, sum3B);
+
+ const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+ const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+ const vector signed short psumA = vec_sub(pp1A, pp2A);
+ const vector signed short psumB = vec_sub(pp1B, pp2B);
+
+ vec_st(psumA, 0, tmp);
+ vec_st(psumB, 16, tmp);
+
+ src += srcStride;
+ tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
+ }
+
+ const vector unsigned char dstperm = vec_lvsr(0, dst);
+ const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
+ const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
+ const vector unsigned char mperm = (const vector unsigned char)
+ AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
+ 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
+
+ int16_t *tmpbis = tmp - (tmpStride * 21);
+
+ vector signed short tmpM2ssA = vec_ld(0, tmpbis);
+ vector signed short tmpM2ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+ vector signed short tmpM1ssA = vec_ld(0, tmpbis);
+ vector signed short tmpM1ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+ vector signed short tmpP0ssA = vec_ld(0, tmpbis);
+ vector signed short tmpP0ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+ vector signed short tmpP1ssA = vec_ld(0, tmpbis);
+ vector signed short tmpP1ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+ vector signed short tmpP2ssA = vec_ld(0, tmpbis);
+ vector signed short tmpP2ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+
+ for (i = 0 ; i < 16 ; i++) {
+ const vector signed short tmpP3ssA = vec_ld(0, tmpbis);
+ const vector signed short tmpP3ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+
+ const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
+ const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
+ const vector signed short sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
+ const vector signed short sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
+ const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
+ const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
+
+ tmpM2ssA = tmpM1ssA;
+ tmpM2ssB = tmpM1ssB;
+ tmpM1ssA = tmpP0ssA;
+ tmpM1ssB = tmpP0ssB;
+ tmpP0ssA = tmpP1ssA;
+ tmpP0ssB = tmpP1ssB;
+ tmpP1ssA = tmpP2ssA;
+ tmpP1ssB = tmpP2ssB;
+ tmpP2ssA = tmpP3ssA;
+ tmpP2ssB = tmpP3ssB;
+
+ const vector signed int pp1Ae = vec_mule(sum1A, v20ss);
+ const vector signed int pp1Ao = vec_mulo(sum1A, v20ss);
+ const vector signed int pp1Be = vec_mule(sum1B, v20ss);
+ const vector signed int pp1Bo = vec_mulo(sum1B, v20ss);
+
+ const vector signed int pp2Ae = vec_mule(sum2A, v5ss);
+ const vector signed int pp2Ao = vec_mulo(sum2A, v5ss);
+ const vector signed int pp2Be = vec_mule(sum2B, v5ss);
+ const vector signed int pp2Bo = vec_mulo(sum2B, v5ss);
+
+ const vector signed int pp3Ae = vec_sra((vector signed int)sum3A, v16ui);
+ const vector signed int pp3Ao = vec_mulo(sum3A, v1ss);
+ const vector signed int pp3Be = vec_sra((vector signed int)sum3B, v16ui);
+ const vector signed int pp3Bo = vec_mulo(sum3B, v1ss);
+
+ const vector signed int pp1cAe = vec_add(pp1Ae, v512si);
+ const vector signed int pp1cAo = vec_add(pp1Ao, v512si);
+ const vector signed int pp1cBe = vec_add(pp1Be, v512si);
+ const vector signed int pp1cBo = vec_add(pp1Bo, v512si);
+
+ const vector signed int pp32Ae = vec_sub(pp3Ae, pp2Ae);
+ const vector signed int pp32Ao = vec_sub(pp3Ao, pp2Ao);
+ const vector signed int pp32Be = vec_sub(pp3Be, pp2Be);
+ const vector signed int pp32Bo = vec_sub(pp3Bo, pp2Bo);
+
+ const vector signed int sumAe = vec_add(pp1cAe, pp32Ae);
+ const vector signed int sumAo = vec_add(pp1cAo, pp32Ao);
+ const vector signed int sumBe = vec_add(pp1cBe, pp32Be);
+ const vector signed int sumBo = vec_add(pp1cBo, pp32Bo);
+
+ const vector signed int ssumAe = vec_sra(sumAe, v10ui);
+ const vector signed int ssumAo = vec_sra(sumAo, v10ui);
+ const vector signed int ssumBe = vec_sra(sumBe, v10ui);
+ const vector signed int ssumBo = vec_sra(sumBo, v10ui);
+
+ const vector signed short ssume = vec_packs(ssumAe, ssumBe);
+ const vector signed short ssumo = vec_packs(ssumAo, ssumBo);
+
+ const vector unsigned char sumv = vec_packsu(ssume, ssumo);
+ const vector unsigned char sum = vec_perm(sumv, sumv, mperm);
+
+ const vector unsigned char dst1 = vec_ld(0, dst);
+ const vector unsigned char dst2 = vec_ld(16, dst);
+ const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+ vector unsigned char fsum;
+ OP_U8_ALTIVEC(fsum, sum, vdst);
+
+ const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm);
+ const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask);
+ const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask);
+
+ vec_st(fdst1, 0, dst);
+ vec_st(fdst2, 16, dst);
+
+ dst += dstStride;
+ }
+ POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+}
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c
index 55a4587f9..80dea036d 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c
@@ -62,8 +62,18 @@ static unsigned char* perfname[] = {
"put_no_rnd_pixels16_xy2_altivec",
"hadamard8_diff8x8_altivec",
"hadamard8_diff16_altivec",
+ "avg_pixels8_xy2_altivec",
"clear_blocks_dcbz32_ppc",
- "clear_blocks_dcbz128_ppc"
+ "clear_blocks_dcbz128_ppc",
+ "put_h264_chroma_mc8_altivec",
+ "avg_h264_chroma_mc8_altivec",
+ "put_h264_qpel16_h_lowpass_altivec",
+ "avg_h264_qpel16_h_lowpass_altivec",
+ "put_h264_qpel16_v_lowpass_altivec",
+ "avg_h264_qpel16_v_lowpass_altivec",
+ "put_h264_qpel16_hv_lowpass_altivec",
+ "avg_h264_qpel16_hv_lowpass_altivec",
+ ""
};
#include <stdio.h>
#endif
@@ -131,7 +141,7 @@ POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz32, 1);
((unsigned long*)blocks)[3] = 0L;
i += 16;
}
- for ( ; i < sizeof(DCTELEM)*6*64 ; i += 32) {
+ for ( ; i < sizeof(DCTELEM)*6*64-31 ; i += 32) {
#ifndef __MWERKS__
asm volatile("dcbz %0,%1" : : "b" (blocks), "r" (i) : "memory");
#else
@@ -227,6 +237,9 @@ long check_dcbzl_effect(void)
}
#endif
+
+void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx);
+
void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
{
// Common optimizations whether Altivec is available or not
@@ -241,8 +254,10 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
default:
break;
}
-
+
#ifdef HAVE_ALTIVEC
+ dsputil_h264_init_ppc(c, avctx);
+
if (has_altivec()) {
mm_flags |= MM_ALTIVEC;
@@ -268,10 +283,8 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
/* the two functions do the same thing, so use the same code */
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec;
c->avg_pixels_tab[0][0] = avg_pixels16_altivec;
-// next one disabled as it's untested.
-#if 0
c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
-#endif /* 0 */
+ c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
@@ -279,7 +292,7 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
c->gmc1 = gmc1_altivec;
-#if (__GNUC__ * 100 + __GNUC_MINOR__ >= 330)
+#ifdef CONFIG_DARWIN // ATM gcc-3.3 and gcc-3.4 fail to compile these in linux...
c->hadamard8_diff[0] = hadamard8_diff16_altivec;
c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
#endif
@@ -311,10 +324,10 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
{
for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++)
{
- perfdata[j][i][powerpc_data_min] = (unsigned long long)0xFFFFFFFFFFFFFFFF;
- perfdata[j][i][powerpc_data_max] = (unsigned long long)0x0000000000000000;
- perfdata[j][i][powerpc_data_sum] = (unsigned long long)0x0000000000000000;
- perfdata[j][i][powerpc_data_num] = (unsigned long long)0x0000000000000000;
+ perfdata[j][i][powerpc_data_min] = 0xFFFFFFFFFFFFFFFFULL;
+ perfdata[j][i][powerpc_data_max] = 0x0000000000000000ULL;
+ perfdata[j][i][powerpc_data_sum] = 0x0000000000000000ULL;
+ perfdata[j][i][powerpc_data_num] = 0x0000000000000000ULL;
}
}
}
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h
index 8b34c6b45..7e01677f1 100644
--- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h
+++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h
@@ -32,8 +32,11 @@
#ifdef POWERPC_PERFORMANCE_REPORT
void powerpc_display_perf_report(void);
-/* the 604* have 2, the G3* have 4, the G4s have 6 */
-#define POWERPC_NUM_PMC_ENABLED 4
+/* the 604* have 2, the G3* have 4, the G4s have 6,
+ and the G5 are completely different (they MUST use
+ POWERPC_MODE_64BITS, and let's hope all future 64 bis PPC
+ will use the same PMCs... */
+#define POWERPC_NUM_PMC_ENABLED 6
/* if you add to the enum below, also add to the perfname array
in dsputil_ppc.c */
enum powerpc_perf_index {
@@ -52,8 +55,17 @@ enum powerpc_perf_index {
altivec_put_no_rnd_pixels16_xy2_num,
altivec_hadamard8_diff8x8_num,
altivec_hadamard8_diff16_num,
+ altivec_avg_pixels8_xy2_num,
powerpc_clear_blocks_dcbz32,
powerpc_clear_blocks_dcbz128,
+ altivec_put_h264_chroma_mc8_num,
+ altivec_avg_h264_chroma_mc8_num,
+ altivec_put_h264_qpel16_h_lowpass_num,
+ altivec_avg_h264_qpel16_h_lowpass_num,
+ altivec_put_h264_qpel16_v_lowpass_num,
+ altivec_avg_h264_qpel16_v_lowpass_num,
+ altivec_put_h264_qpel16_hv_lowpass_num,
+ altivec_avg_h264_qpel16_hv_lowpass_num,
powerpc_perf_total
};
enum powerpc_data_index {
diff --git a/src/libffmpeg/libavcodec/ppc/fdct_altivec.c b/src/libffmpeg/libavcodec/ppc/fdct_altivec.c
index 99df5ced3..97333432f 100644
--- a/src/libffmpeg/libavcodec/ppc/fdct_altivec.c
+++ b/src/libffmpeg/libavcodec/ppc/fdct_altivec.c
@@ -214,8 +214,8 @@ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
/* setup constants {{{ */
/* mzero = -0.0 */
- vu32(mzero) = vec_splat_u32(-1);
- vu32(mzero) = vec_sl(vu32(mzero), vu32(mzero));
+ mzero = ((vector float)vec_splat_u32(-1));
+ mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
cp = fdctconsts;
cnsts0 = vec_ld(0, cp); cp++;
cnsts1 = vec_ld(0, cp); cp++;
@@ -227,43 +227,43 @@ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
#define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
bp = (vector signed short*)block;
- vs16(b00) = vec_ld(0, bp);
- vs16(b40) = vec_ld(16*4, bp);
- vs16(b01) = MERGE_S16(h, b00, b40);
- vs16(b11) = MERGE_S16(l, b00, b40);
+ b00 = ((vector float)vec_ld(0, bp));
+ b40 = ((vector float)vec_ld(16*4, bp));
+ b01 = ((vector float)MERGE_S16(h, b00, b40));
+ b11 = ((vector float)MERGE_S16(l, b00, b40));
bp++;
- vs16(b10) = vec_ld(0, bp);
- vs16(b50) = vec_ld(16*4, bp);
- vs16(b21) = MERGE_S16(h, b10, b50);
- vs16(b31) = MERGE_S16(l, b10, b50);
+ b10 = ((vector float)vec_ld(0, bp));
+ b50 = ((vector float)vec_ld(16*4, bp));
+ b21 = ((vector float)MERGE_S16(h, b10, b50));
+ b31 = ((vector float)MERGE_S16(l, b10, b50));
bp++;
- vs16(b20) = vec_ld(0, bp);
- vs16(b60) = vec_ld(16*4, bp);
- vs16(b41) = MERGE_S16(h, b20, b60);
- vs16(b51) = MERGE_S16(l, b20, b60);
+ b20 = ((vector float)vec_ld(0, bp));
+ b60 = ((vector float)vec_ld(16*4, bp));
+ b41 = ((vector float)MERGE_S16(h, b20, b60));
+ b51 = ((vector float)MERGE_S16(l, b20, b60));
bp++;
- vs16(b30) = vec_ld(0, bp);
- vs16(b70) = vec_ld(16*4, bp);
- vs16(b61) = MERGE_S16(h, b30, b70);
- vs16(b71) = MERGE_S16(l, b30, b70);
-
- vs16(x0) = MERGE_S16(h, b01, b41);
- vs16(x1) = MERGE_S16(l, b01, b41);
- vs16(x2) = MERGE_S16(h, b11, b51);
- vs16(x3) = MERGE_S16(l, b11, b51);
- vs16(x4) = MERGE_S16(h, b21, b61);
- vs16(x5) = MERGE_S16(l, b21, b61);
- vs16(x6) = MERGE_S16(h, b31, b71);
- vs16(x7) = MERGE_S16(l, b31, b71);
-
- vs16(b00) = MERGE_S16(h, x0, x4);
- vs16(b10) = MERGE_S16(l, x0, x4);
- vs16(b20) = MERGE_S16(h, x1, x5);
- vs16(b30) = MERGE_S16(l, x1, x5);
- vs16(b40) = MERGE_S16(h, x2, x6);
- vs16(b50) = MERGE_S16(l, x2, x6);
- vs16(b60) = MERGE_S16(h, x3, x7);
- vs16(b70) = MERGE_S16(l, x3, x7);
+ b30 = ((vector float)vec_ld(0, bp));
+ b70 = ((vector float)vec_ld(16*4, bp));
+ b61 = ((vector float)MERGE_S16(h, b30, b70));
+ b71 = ((vector float)MERGE_S16(l, b30, b70));
+
+ x0 = ((vector float)MERGE_S16(h, b01, b41));
+ x1 = ((vector float)MERGE_S16(l, b01, b41));
+ x2 = ((vector float)MERGE_S16(h, b11, b51));
+ x3 = ((vector float)MERGE_S16(l, b11, b51));
+ x4 = ((vector float)MERGE_S16(h, b21, b61));
+ x5 = ((vector float)MERGE_S16(l, b21, b61));
+ x6 = ((vector float)MERGE_S16(h, b31, b71));
+ x7 = ((vector float)MERGE_S16(l, b31, b71));
+
+ b00 = ((vector float)MERGE_S16(h, x0, x4));
+ b10 = ((vector float)MERGE_S16(l, x0, x4));
+ b20 = ((vector float)MERGE_S16(h, x1, x5));
+ b30 = ((vector float)MERGE_S16(l, x1, x5));
+ b40 = ((vector float)MERGE_S16(h, x2, x6));
+ b50 = ((vector float)MERGE_S16(l, x2, x6));
+ b60 = ((vector float)MERGE_S16(h, x3, x7));
+ b70 = ((vector float)MERGE_S16(l, x3, x7));
#undef MERGE_S16
/* }}} */
@@ -275,32 +275,32 @@ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
*/
#if 1
/* fdct rows {{{ */
- vs16(x0) = vec_add(vs16(b00), vs16(b70));
- vs16(x7) = vec_sub(vs16(b00), vs16(b70));
- vs16(x1) = vec_add(vs16(b10), vs16(b60));
- vs16(x6) = vec_sub(vs16(b10), vs16(b60));
- vs16(x2) = vec_add(vs16(b20), vs16(b50));
- vs16(x5) = vec_sub(vs16(b20), vs16(b50));
- vs16(x3) = vec_add(vs16(b30), vs16(b40));
- vs16(x4) = vec_sub(vs16(b30), vs16(b40));
+ x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
+ x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
+ x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
+ x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
+ x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
+ x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
+ x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
+ x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));
- vs16(b70) = vec_add(vs16(x0), vs16(x3));
- vs16(b10) = vec_add(vs16(x1), vs16(x2));
+ b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
+ b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));
- vs16(b00) = vec_add(vs16(b70), vs16(b10));
- vs16(b40) = vec_sub(vs16(b70), vs16(b10));
+ b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
+ b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));
#define CTF0(n) \
- vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
- vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
+ b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
+ b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
b##n##1 = vec_ctf(vs32(b##n##1), 0); \
b##n##0 = vec_ctf(vs32(b##n##0), 0);
CTF0(0);
CTF0(4);
- vs16(b20) = vec_sub(vs16(x0), vs16(x3));
- vs16(b60) = vec_sub(vs16(x1), vs16(x2));
+ b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
+ b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));
CTF0(2);
CTF0(6);
@@ -321,8 +321,8 @@ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
b61 = vec_madd(cnst, b61, x1);
#define CTFX(x,b) \
- vs32(b##0) = vec_unpackh(vs16(x)); \
- vs32(b##1) = vec_unpackl(vs16(x)); \
+ b##0 = ((vector float)vec_unpackh(vs16(x))); \
+ b##1 = ((vector float)vec_unpackl(vs16(x))); \
b##0 = vec_ctf(vs32(b##0), 0); \
b##1 = vec_ctf(vs32(b##1), 0); \
@@ -473,9 +473,9 @@ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
#define CTS(n) \
b##n##0 = vec_round(b##n##0); \
b##n##1 = vec_round(b##n##1); \
- vs32(b##n##0) = vec_cts(b##n##0, 0); \
- vs32(b##n##1) = vec_cts(b##n##1, 0); \
- vs16(b##n##0) = vec_pack(vs32(b##n##0), vs32(b##n##1)); \
+ b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
+ b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
+ b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
vec_st(vs16(b##n##0), 0, bp);
bp = (vector signed short*)block;