summaryrefslogtreecommitdiff
path: root/src/libffmpeg/libavcodec/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'src/libffmpeg/libavcodec/ppc')
-rw-r--r--src/libffmpeg/libavcodec/ppc/.hgignore6
-rw-r--r--src/libffmpeg/libavcodec/ppc/float_altivec.c193
-rw-r--r--src/libffmpeg/libavcodec/ppc/h264_altivec.c565
-rw-r--r--src/libffmpeg/libavcodec/ppc/h264_template_altivec.c719
-rw-r--r--src/libffmpeg/libavcodec/ppc/mathops.h33
-rw-r--r--src/libffmpeg/libavcodec/ppc/snow_altivec.c788
-rw-r--r--src/libffmpeg/libavcodec/ppc/types_altivec.h41
-rw-r--r--src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c338
8 files changed, 2683 insertions, 0 deletions
diff --git a/src/libffmpeg/libavcodec/ppc/.hgignore b/src/libffmpeg/libavcodec/ppc/.hgignore
new file mode 100644
index 000000000..7d926a554
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/.hgignore
@@ -0,0 +1,6 @@
+Makefile
+Makefile.in
+.libs
+.deps
+*.lo
+*.la
diff --git a/src/libffmpeg/libavcodec/ppc/float_altivec.c b/src/libffmpeg/libavcodec/ppc/float_altivec.c
new file mode 100644
index 000000000..22c2de61a
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/float_altivec.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+
+#include "dsputil_altivec.h"
+
+static void vector_fmul_altivec(float *dst, const float *src, int len)
+{
+ int i;
+ vector float d0, d1, s, zero = (vector float)vec_splat_u32(0);
+ for(i=0; i<len-7; i+=8) {
+ d0 = vec_ld(0, dst+i);
+ s = vec_ld(0, src+i);
+ d1 = vec_ld(16, dst+i);
+ d0 = vec_madd(d0, s, zero);
+ d1 = vec_madd(d1, vec_ld(16,src+i), zero);
+ vec_st(d0, 0, dst+i);
+ vec_st(d1, 16, dst+i);
+ }
+}
+
+static void vector_fmul_reverse_altivec(float *dst, const float *src0,
+ const float *src1, int len)
+{
+ int i;
+ vector float d, s0, s1, h0, l0,
+ s2, s3, zero = (vector float)vec_splat_u32(0);
+ src1 += len-4;
+ for(i=0; i<len-7; i+=8) {
+ s1 = vec_ld(0, src1-i); // [a,b,c,d]
+ s0 = vec_ld(0, src0+i);
+ l0 = vec_mergel(s1, s1); // [c,c,d,d]
+ s3 = vec_ld(-16, src1-i);
+ h0 = vec_mergeh(s1, s1); // [a,a,b,b]
+ s2 = vec_ld(16, src0+i);
+ s1 = vec_mergeh(vec_mergel(l0,h0), // [d,b,d,b]
+ vec_mergeh(l0,h0)); // [c,a,c,a]
+ // [d,c,b,a]
+ l0 = vec_mergel(s3, s3);
+ d = vec_madd(s0, s1, zero);
+ h0 = vec_mergeh(s3, s3);
+ vec_st(d, 0, dst+i);
+ s3 = vec_mergeh(vec_mergel(l0,h0),
+ vec_mergeh(l0,h0));
+ d = vec_madd(s2, s3, zero);
+ vec_st(d, 16, dst+i);
+ }
+}
+
+static void vector_fmul_add_add_altivec(float *dst, const float *src0,
+ const float *src1, const float *src2,
+ int src3, int len, int step)
+{
+ int i;
+ vector float d, s0, s1, s2, t0, t1, edges;
+ vector unsigned char align = vec_lvsr(0,dst),
+ mask = vec_lvsl(0, dst);
+
+#if 0 //FIXME: there is still something wrong
+ if (step == 2) {
+ int y;
+ vector float d0, d1, s3, t2;
+ vector unsigned int sel =
+ vec_mergeh(vec_splat_u32(-1), vec_splat_u32(0));
+ t1 = vec_ld(16, dst);
+ for (i=0,y=0; i<len-3; i+=4,y+=8) {
+
+ s0 = vec_ld(0,src0+i);
+ s1 = vec_ld(0,src1+i);
+ s2 = vec_ld(0,src2+i);
+
+// t0 = vec_ld(0, dst+y); //[x x x|a]
+// t1 = vec_ld(16, dst+y); //[b c d|e]
+ t2 = vec_ld(31, dst+y); //[f g h|x]
+
+ d = vec_madd(s0,s1,s2); // [A B C D]
+
+ // [A A B B]
+
+ // [C C D D]
+
+ d0 = vec_perm(t0, t1, mask); // [a b c d]
+
+ d0 = vec_sel(vec_mergeh(d, d), d0, sel); // [A b B d]
+
+ edges = vec_perm(t1, t0, mask);
+
+ t0 = vec_perm(edges, d0, align); // [x x x|A]
+
+ t1 = vec_perm(d0, edges, align); // [b B d|e]
+
+ vec_stl(t0, 0, dst+y);
+
+ d1 = vec_perm(t1, t2, mask); // [e f g h]
+
+ d1 = vec_sel(vec_mergel(d, d), d1, sel); // [C f D h]
+
+ edges = vec_perm(t2, t1, mask);
+
+ t1 = vec_perm(edges, d1, align); // [b B d|C]
+
+ t2 = vec_perm(d1, edges, align); // [f D h|x]
+
+ vec_stl(t1, 16, dst+y);
+
+ t0 = t1;
+
+ vec_stl(t2, 31, dst+y);
+
+ t1 = t2;
+ }
+ } else
+ #endif
+ if (step == 1 && src3 == 0)
+ for (i=0; i<len-3; i+=4) {
+ t0 = vec_ld(0, dst+i);
+ t1 = vec_ld(15, dst+i);
+ s0 = vec_ld(0, src0+i);
+ s1 = vec_ld(0, src1+i);
+ s2 = vec_ld(0, src2+i);
+ edges = vec_perm(t1 ,t0, mask);
+ d = vec_madd(s0,s1,s2);
+ t1 = vec_perm(d, edges, align);
+ t0 = vec_perm(edges, d, align);
+ vec_st(t1, 15, dst+i);
+ vec_st(t0, 0, dst+i);
+ }
+ else
+ ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
+}
+
+void float_to_int16_altivec(int16_t *dst, const float *src, int len)
+{
+ int i;
+ vector float s0, s1;
+ vector signed int t0, t1;
+ vector signed short d0, d1, d;
+ vector unsigned char align;
+ if(((long)dst)&15) //FIXME
+ for(i=0; i<len-7; i+=8) {
+ s0 = vec_ld(0, src+i);
+ s1 = vec_ld(16, src+i);
+ t0 = vec_cts(s0, 0);
+ d0 = vec_ld(0, dst+i);
+ t1 = vec_cts(s1, 0);
+ d1 = vec_ld(15, dst+i);
+ d = vec_packs(t0,t1);
+ d1 = vec_perm(d1, d0, vec_lvsl(0,dst+i));
+ align = vec_lvsr(0, dst+i);
+ d0 = vec_perm(d1, d, align);
+ d1 = vec_perm(d, d1, align);
+ vec_st(d0, 0, dst+i);
+ vec_st(d1,15, dst+i);
+ }
+ else
+ for(i=0; i<len-7; i+=8) {
+ s0 = vec_ld(0, src+i);
+ s1 = vec_ld(16, src+i);
+ t0 = vec_cts(s0, 0);
+ t1 = vec_cts(s1, 0);
+ d = vec_packs(t0,t1);
+ vec_st(d, 0, dst+i);
+ }
+}
+
+void float_init_altivec(DSPContext* c, AVCodecContext *avctx)
+{
+ c->vector_fmul = vector_fmul_altivec;
+ c->vector_fmul_reverse = vector_fmul_reverse_altivec;
+ c->vector_fmul_add_add = vector_fmul_add_add_altivec;
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT))
+ c->float_to_int16 = float_to_int16_altivec;
+}
diff --git a/src/libffmpeg/libavcodec/ppc/h264_altivec.c b/src/libffmpeg/libavcodec/ppc/h264_altivec.c
new file mode 100644
index 000000000..bac620e82
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/h264_altivec.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+
+#include "dsputil_altivec.h"
+#include "types_altivec.h"
+
+#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
+#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
+
+#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
+#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
+#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
+#define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
+#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
+#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
+#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
+#include "h264_template_altivec.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#undef PREFIX_h264_qpel16_h_lowpass_altivec
+#undef PREFIX_h264_qpel16_h_lowpass_num
+#undef PREFIX_h264_qpel16_v_lowpass_altivec
+#undef PREFIX_h264_qpel16_v_lowpass_num
+#undef PREFIX_h264_qpel16_hv_lowpass_altivec
+#undef PREFIX_h264_qpel16_hv_lowpass_num
+
+#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
+#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
+#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
+#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
+#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
+#define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
+#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
+#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
+#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
+#include "h264_template_altivec.c"
+#undef OP_U8_ALTIVEC
+#undef PREFIX_h264_chroma_mc8_altivec
+#undef PREFIX_h264_chroma_mc8_num
+#undef PREFIX_h264_qpel16_h_lowpass_altivec
+#undef PREFIX_h264_qpel16_h_lowpass_num
+#undef PREFIX_h264_qpel16_v_lowpass_altivec
+#undef PREFIX_h264_qpel16_v_lowpass_num
+#undef PREFIX_h264_qpel16_hv_lowpass_altivec
+#undef PREFIX_h264_qpel16_hv_lowpass_num
+
+#define H264_MC(OPNAME, SIZE, CODETYPE) \
+static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
+ DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
+}\
+\
+static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
+ DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
+ put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
+ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
+}\
+
+/* this code assume that stride % 16 == 0 */
+void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
+ signed int ABCD[4] __attribute__((aligned(16))) =
+ {((8 - x) * (8 - y)),
+ ((x) * (8 - y)),
+ ((8 - x) * (y)),
+ ((x) * (y))};
+ register int i;
+ vector unsigned char fperm;
+ const vector signed int vABCD = vec_ld(0, ABCD);
+ const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
+ const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
+ const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
+ const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector signed short v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
+ const vector unsigned short v6us = vec_splat_u16(6);
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+ vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
+ vector unsigned char vsrc0uc, vsrc1uc;
+ vector signed short vsrc0ssH, vsrc1ssH;
+ vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc;
+ vector signed short vsrc2ssH, vsrc3ssH, psum;
+ vector unsigned char vdst, ppsum, fsum;
+
+ if (((unsigned long)dst) % 16 == 0) {
+ fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17,
+ 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F);
+ } else {
+ fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F);
+ }
+
+ vsrcAuc = vec_ld(0, src);
+
+ if (loadSecond)
+ vsrcBuc = vec_ld(16, src);
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+
+ vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc1uc = vsrcBuc;
+ else
+ vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+ vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc0uc);
+ vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc1uc);
+
+ if (!loadSecond) {// -> !reallyBadAlign
+ for (i = 0 ; i < h ; i++) {
+
+
+ vsrcCuc = vec_ld(stride + 0, src);
+
+ vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+ vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+ vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc2uc);
+ vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc3uc);
+
+ psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+ psum = vec_mladd(vB, vsrc1ssH, psum);
+ psum = vec_mladd(vC, vsrc2ssH, psum);
+ psum = vec_mladd(vD, vsrc3ssH, psum);
+ psum = vec_add(v28ss, psum);
+ psum = vec_sra(psum, v6us);
+
+ vdst = vec_ld(0, dst);
+ ppsum = (vector unsigned char)vec_packsu(psum, psum);
+ fsum = vec_perm(vdst, ppsum, fperm);
+
+ vec_st(fsum, 0, dst);
+
+ vsrc0ssH = vsrc2ssH;
+ vsrc1ssH = vsrc3ssH;
+
+ dst += stride;
+ src += stride;
+ }
+ } else {
+ vector unsigned char vsrcDuc;
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrcDuc = vec_ld(stride + 16, src);
+
+ vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc3uc = vsrcDuc;
+ else
+ vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+ vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc2uc);
+ vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc3uc);
+
+ psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+ psum = vec_mladd(vB, vsrc1ssH, psum);
+ psum = vec_mladd(vC, vsrc2ssH, psum);
+ psum = vec_mladd(vD, vsrc3ssH, psum);
+ psum = vec_add(v28ss, psum);
+ psum = vec_sr(psum, v6us);
+
+ vdst = vec_ld(0, dst);
+ ppsum = (vector unsigned char)vec_pack(psum, psum);
+ fsum = vec_perm(vdst, ppsum, fperm);
+
+ vec_st(fsum, 0, dst);
+
+ vsrc0ssH = vsrc2ssH;
+ vsrc1ssH = vsrc3ssH;
+
+ dst += stride;
+ src += stride;
+ }
+ }
+}
+
+static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
+ const uint8_t * src2, int dst_stride,
+ int src_stride1, int h)
+{
+ int i;
+ vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align;
+
+ mask_ = vec_lvsl(0, src2);
+
+ for (i = 0; i < h; i++) {
+
+ tmp1 = vec_ld(i * src_stride1, src1);
+ mask = vec_lvsl(i * src_stride1, src1);
+ tmp2 = vec_ld(i * src_stride1 + 15, src1);
+
+ a = vec_perm(tmp1, tmp2, mask);
+
+ tmp1 = vec_ld(i * 16, src2);
+ tmp2 = vec_ld(i * 16 + 15, src2);
+
+ b = vec_perm(tmp1, tmp2, mask_);
+
+ tmp1 = vec_ld(0, dst);
+ mask = vec_lvsl(0, dst);
+ tmp2 = vec_ld(15, dst);
+
+ d = vec_avg(a, b);
+
+ edges = vec_perm(tmp2, tmp1, mask);
+
+ align = vec_lvsr(0, dst);
+
+ tmp2 = vec_perm(d, edges, align);
+ tmp1 = vec_perm(edges, d, align);
+
+ vec_st(tmp2, 15, dst);
+ vec_st(tmp1, 0 , dst);
+
+ dst += dst_stride;
+ }
+}
+
+static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
+ const uint8_t * src2, int dst_stride,
+ int src_stride1, int h)
+{
+ int i;
+ vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align;
+
+ mask_ = vec_lvsl(0, src2);
+
+ for (i = 0; i < h; i++) {
+
+ tmp1 = vec_ld(i * src_stride1, src1);
+ mask = vec_lvsl(i * src_stride1, src1);
+ tmp2 = vec_ld(i * src_stride1 + 15, src1);
+
+ a = vec_perm(tmp1, tmp2, mask);
+
+ tmp1 = vec_ld(i * 16, src2);
+ tmp2 = vec_ld(i * 16 + 15, src2);
+
+ b = vec_perm(tmp1, tmp2, mask_);
+
+ tmp1 = vec_ld(0, dst);
+ mask = vec_lvsl(0, dst);
+ tmp2 = vec_ld(15, dst);
+
+ d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
+
+ edges = vec_perm(tmp2, tmp1, mask);
+
+ align = vec_lvsr(0, dst);
+
+ tmp2 = vec_perm(d, edges, align);
+ tmp1 = vec_perm(edges, d, align);
+
+ vec_st(tmp2, 15, dst);
+ vec_st(tmp1, 0 , dst);
+
+ dst += dst_stride;
+ }
+}
+
+/* Implemented but could be faster
+#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
+#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
+ */
+
+ H264_MC(put_, 16, altivec)
+ H264_MC(avg_, 16, altivec)
+
+
+/****************************************************************************
+ * IDCT transform:
+ ****************************************************************************/
+
+#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
+ /* a0 = SRC(0) + SRC(4); */ \
+ vec_s16_t a0v = vec_add(s0, s4); \
+ /* a2 = SRC(0) - SRC(4); */ \
+ vec_s16_t a2v = vec_sub(s0, s4); \
+ /* a4 = (SRC(2)>>1) - SRC(6); */ \
+ vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
+ /* a6 = (SRC(6)>>1) + SRC(2); */ \
+ vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
+ /* b0 = a0 + a6; */ \
+ vec_s16_t b0v = vec_add(a0v, a6v); \
+ /* b2 = a2 + a4; */ \
+ vec_s16_t b2v = vec_add(a2v, a4v); \
+ /* b4 = a2 - a4; */ \
+ vec_s16_t b4v = vec_sub(a2v, a4v); \
+ /* b6 = a0 - a6; */ \
+ vec_s16_t b6v = vec_sub(a0v, a6v); \
+ /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
+ /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
+ vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
+ /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
+ /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
+ vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
+ /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
+ /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
+ vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
+ /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
+ vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
+ /* b1 = (a7>>2) + a1; */ \
+ vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
+ /* b3 = a3 + (a5>>2); */ \
+ vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
+ /* b5 = (a3>>2) - a5; */ \
+ vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
+ /* b7 = a7 - (a1>>2); */ \
+ vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
+ /* DST(0, b0 + b7); */ \
+ d0 = vec_add(b0v, b7v); \
+ /* DST(1, b2 + b5); */ \
+ d1 = vec_add(b2v, b5v); \
+ /* DST(2, b4 + b3); */ \
+ d2 = vec_add(b4v, b3v); \
+ /* DST(3, b6 + b1); */ \
+ d3 = vec_add(b6v, b1v); \
+ /* DST(4, b6 - b1); */ \
+ d4 = vec_sub(b6v, b1v); \
+ /* DST(5, b4 - b3); */ \
+ d5 = vec_sub(b4v, b3v); \
+ /* DST(6, b2 - b5); */ \
+ d6 = vec_sub(b2v, b5v); \
+ /* DST(7, b0 - b7); */ \
+ d7 = vec_sub(b0v, b7v); \
+}
+
+#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
+ /* unaligned load */ \
+ vec_u8_t hv = vec_ld( 0, dest ); \
+ vec_u8_t lv = vec_ld( 7, dest ); \
+ vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \
+ vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
+ vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
+ vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
+ vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \
+ vec_u8_t edgehv; \
+ /* unaligned store */ \
+ vec_u8_t bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
+ vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
+ lv = vec_sel( lv, bodyv, edgelv ); \
+ vec_st( lv, 7, dest ); \
+ hv = vec_ld( 0, dest ); \
+ edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
+ hv = vec_sel( hv, bodyv, edgehv ); \
+ vec_st( hv, 0, dest ); \
+ }
+
+void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
+ vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
+ vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
+ vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
+
+ vec_u8_t perm_ldv = vec_lvsl(0, dst);
+ vec_u8_t perm_stv = vec_lvsr(8, dst);
+
+ const vec_u16_t onev = vec_splat_u16(1);
+ const vec_u16_t twov = vec_splat_u16(2);
+ const vec_u16_t sixv = vec_splat_u16(6);
+
+ const vec_u8_t sel = (vec_u8_t) AVV(0,0,0,0,0,0,0,0,
+ -1,-1,-1,-1,-1,-1,-1,-1);
+ LOAD_ZERO;
+
+ dct[0] += 32; // rounding for the >>6 at the end
+
+ s0 = vec_ld(0x00, (int16_t*)dct);
+ s1 = vec_ld(0x10, (int16_t*)dct);
+ s2 = vec_ld(0x20, (int16_t*)dct);
+ s3 = vec_ld(0x30, (int16_t*)dct);
+ s4 = vec_ld(0x40, (int16_t*)dct);
+ s5 = vec_ld(0x50, (int16_t*)dct);
+ s6 = vec_ld(0x60, (int16_t*)dct);
+ s7 = vec_ld(0x70, (int16_t*)dct);
+
+ IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
+ d0, d1, d2, d3, d4, d5, d6, d7);
+
+ TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
+
+ IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
+ idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
+
+ ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
+ ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
+ ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
+ ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
+ ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
+ ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
+ ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
+ ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
+}
+
+void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
+
+#ifdef HAVE_ALTIVEC
+ if (has_altivec()) {
+ c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
+ c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
+ c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
+ c->h264_idct8_add = ff_h264_idct8_add_altivec;
+
+#define dspfunc(PFX, IDX, NUM) \
+ c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
+ c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
+ c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
+ c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
+ c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
+ c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
+ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
+ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
+
+ dspfunc(put_h264_qpel, 0, 16);
+ dspfunc(avg_h264_qpel, 0, 16);
+#undef dspfunc
+
+ } else
+#endif /* HAVE_ALTIVEC */
+ {
+ // Non-AltiVec PPC optimisations
+
+ // ... pending ...
+ }
+}
diff --git a/src/libffmpeg/libavcodec/ppc/h264_template_altivec.c b/src/libffmpeg/libavcodec/ppc/h264_template_altivec.c
new file mode 100644
index 000000000..e8ad67f2f
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/h264_template_altivec.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/* this code assume that stride % 16 == 0 */
+void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
+ POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
+ signed int ABCD[4] __attribute__((aligned(16))) =
+ {((8 - x) * (8 - y)),
+ ((x) * (8 - y)),
+ ((8 - x) * (y)),
+ ((x) * (y))};
+ register int i;
+ vector unsigned char fperm;
+ const vector signed int vABCD = vec_ld(0, ABCD);
+ const vector signed short vA = vec_splat((vector signed short)vABCD, 1);
+ const vector signed short vB = vec_splat((vector signed short)vABCD, 3);
+ const vector signed short vC = vec_splat((vector signed short)vABCD, 5);
+ const vector signed short vD = vec_splat((vector signed short)vABCD, 7);
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector signed short v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
+ const vector unsigned short v6us = vec_splat_u16(6);
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+
+ vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1;
+ vector unsigned char vsrc0uc, vsrc1uc;
+ vector signed short vsrc0ssH, vsrc1ssH;
+ vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc;
+ vector signed short vsrc2ssH, vsrc3ssH, psum;
+ vector unsigned char vdst, ppsum, vfdst, fsum;
+
+ POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
+
+ if (((unsigned long)dst) % 16 == 0) {
+ fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13,
+ 0x14, 0x15, 0x16, 0x17,
+ 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F);
+ } else {
+ fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03,
+ 0x04, 0x05, 0x06, 0x07,
+ 0x18, 0x19, 0x1A, 0x1B,
+ 0x1C, 0x1D, 0x1E, 0x1F);
+ }
+
+ vsrcAuc = vec_ld(0, src);
+
+ if (loadSecond)
+ vsrcBuc = vec_ld(16, src);
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+
+ vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc1uc = vsrcBuc;
+ else
+ vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+
+ vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc0uc);
+ vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc1uc);
+
+ if (!loadSecond) {// -> !reallyBadAlign
+ for (i = 0 ; i < h ; i++) {
+
+
+ vsrcCuc = vec_ld(stride + 0, src);
+
+ vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
+ vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+
+ vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc2uc);
+ vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc3uc);
+
+ psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+ psum = vec_mladd(vB, vsrc1ssH, psum);
+ psum = vec_mladd(vC, vsrc2ssH, psum);
+ psum = vec_mladd(vD, vsrc3ssH, psum);
+ psum = vec_add(v32ss, psum);
+ psum = vec_sra(psum, v6us);
+
+ vdst = vec_ld(0, dst);
+ ppsum = (vector unsigned char)vec_packsu(psum, psum);
+ vfdst = vec_perm(vdst, ppsum, fperm);
+
+ OP_U8_ALTIVEC(fsum, vfdst, vdst);
+
+ vec_st(fsum, 0, dst);
+
+ vsrc0ssH = vsrc2ssH;
+ vsrc1ssH = vsrc3ssH;
+
+ dst += stride;
+ src += stride;
+ }
+ } else {
+ vector unsigned char vsrcDuc;
+ for (i = 0 ; i < h ; i++) {
+ vsrcCuc = vec_ld(stride + 0, src);
+ vsrcDuc = vec_ld(stride + 16, src);
+
+ vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
+ if (reallyBadAlign)
+ vsrc3uc = vsrcDuc;
+ else
+ vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
+
+ vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc2uc);
+ vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero,
+ (vector unsigned char)vsrc3uc);
+
+ psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
+ psum = vec_mladd(vB, vsrc1ssH, psum);
+ psum = vec_mladd(vC, vsrc2ssH, psum);
+ psum = vec_mladd(vD, vsrc3ssH, psum);
+ psum = vec_add(v32ss, psum);
+ psum = vec_sr(psum, v6us);
+
+ vdst = vec_ld(0, dst);
+ ppsum = (vector unsigned char)vec_pack(psum, psum);
+ vfdst = vec_perm(vdst, ppsum, fperm);
+
+ OP_U8_ALTIVEC(fsum, vfdst, vdst);
+
+ vec_st(fsum, 0, dst);
+
+ vsrc0ssH = vsrc2ssH;
+ vsrc1ssH = vsrc3ssH;
+
+ dst += stride;
+ src += stride;
+ }
+ }
+ POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1);
+}
+
+/* this code assume stride % 16 == 0 */
+static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
+ POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1);
+ register int i;
+
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector unsigned char permM2 = vec_lvsl(-2, src);
+ const vector unsigned char permM1 = vec_lvsl(-1, src);
+ const vector unsigned char permP0 = vec_lvsl(+0, src);
+ const vector unsigned char permP1 = vec_lvsl(+1, src);
+ const vector unsigned char permP2 = vec_lvsl(+2, src);
+ const vector unsigned char permP3 = vec_lvsl(+3, src);
+ const vector signed short v5ss = vec_splat_s16(5);
+ const vector unsigned short v5us = vec_splat_u16(5);
+ const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+ const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
+ const vector unsigned char dstperm = vec_lvsr(0, dst);
+ const vector unsigned char neg1 =
+ (const vector unsigned char) vec_splat_s8(-1);
+
+ const vector unsigned char dstmask =
+ vec_perm((const vector unsigned char)vzero,
+ neg1, dstperm);
+
+ vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
+
+ register int align = ((((unsigned long)src) - 2) % 16);
+
+ vector signed short srcP0A, srcP0B, srcP1A, srcP1B,
+ srcP2A, srcP2B, srcP3A, srcP3B,
+ srcM1A, srcM1B, srcM2A, srcM2B,
+ sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
+ pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
+ psumA, psumB, sumA, sumB;
+
+ vector unsigned char sum, dst1, dst2, vdst, fsum,
+ rsum, fdst1, fdst2;
+
+ POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
+
+ for (i = 0 ; i < 16 ; i ++) {
+ vector unsigned char srcR1 = vec_ld(-2, src);
+ vector unsigned char srcR2 = vec_ld(14, src);
+
+ switch (align) {
+ default: {
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = vec_perm(srcR1, srcR2, permP2);
+ srcP3 = vec_perm(srcR1, srcR2, permP3);
+ } break;
+ case 11: {
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = vec_perm(srcR1, srcR2, permP2);
+ srcP3 = srcR2;
+ } break;
+ case 12: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = srcR2;
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 13: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = srcR2;
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 14: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = srcR2;
+ srcP1 = vec_perm(srcR2, srcR3, permP1);
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 15: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = srcR2;
+ srcP0 = vec_perm(srcR2, srcR3, permP0);
+ srcP1 = vec_perm(srcR2, srcR3, permP1);
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ }
+
+ srcP0A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP0);
+ srcP0B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP0);
+ srcP1A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP1);
+ srcP1B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP1);
+
+ srcP2A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP2);
+ srcP2B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP2);
+ srcP3A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP3);
+ srcP3B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP3);
+
+ srcM1A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcM1);
+ srcM1B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcM1);
+ srcM2A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcM2);
+ srcM2B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcM2);
+
+ sum1A = vec_adds(srcP0A, srcP1A);
+ sum1B = vec_adds(srcP0B, srcP1B);
+ sum2A = vec_adds(srcM1A, srcP2A);
+ sum2B = vec_adds(srcM1B, srcP2B);
+ sum3A = vec_adds(srcM2A, srcP3A);
+ sum3B = vec_adds(srcM2B, srcP3B);
+
+ pp1A = vec_mladd(sum1A, v20ss, v16ss);
+ pp1B = vec_mladd(sum1B, v20ss, v16ss);
+
+ pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+ pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+ pp3A = vec_add(sum3A, pp1A);
+ pp3B = vec_add(sum3B, pp1B);
+
+ psumA = vec_sub(pp3A, pp2A);
+ psumB = vec_sub(pp3B, pp2B);
+
+ sumA = vec_sra(psumA, v5us);
+ sumB = vec_sra(psumB, v5us);
+
+ sum = vec_packsu(sumA, sumB);
+
+ dst1 = vec_ld(0, dst);
+ dst2 = vec_ld(16, dst);
+ vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+ OP_U8_ALTIVEC(fsum, sum, vdst);
+
+ rsum = vec_perm(fsum, fsum, dstperm);
+ fdst1 = vec_sel(dst1, rsum, dstmask);
+ fdst2 = vec_sel(rsum, dst2, dstmask);
+
+ vec_st(fdst1, 0, dst);
+ vec_st(fdst2, 16, dst);
+
+ src += srcStride;
+ dst += dstStride;
+ }
+POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1);
+}
+
+/* this code assume stride % 16 == 0 */
+static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
+ POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1);
+
+ register int i;
+
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector unsigned char perm = vec_lvsl(0, src);
+ const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+ const vector unsigned short v5us = vec_splat_u16(5);
+ const vector signed short v5ss = vec_splat_s16(5);
+ const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
+ const vector unsigned char dstperm = vec_lvsr(0, dst);
+ const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1);
+ const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
+
+ uint8_t *srcbis = src - (srcStride * 2);
+
+ const vector unsigned char srcM2a = vec_ld(0, srcbis);
+ const vector unsigned char srcM2b = vec_ld(16, srcbis);
+ const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm);
+// srcbis += srcStride;
+ const vector unsigned char srcM1a = vec_ld(0, srcbis += srcStride);
+ const vector unsigned char srcM1b = vec_ld(16, srcbis);
+ const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm);
+// srcbis += srcStride;
+ const vector unsigned char srcP0a = vec_ld(0, srcbis += srcStride);
+ const vector unsigned char srcP0b = vec_ld(16, srcbis);
+ const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm);
+// srcbis += srcStride;
+ const vector unsigned char srcP1a = vec_ld(0, srcbis += srcStride);
+ const vector unsigned char srcP1b = vec_ld(16, srcbis);
+ const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm);
+// srcbis += srcStride;
+ const vector unsigned char srcP2a = vec_ld(0, srcbis += srcStride);
+ const vector unsigned char srcP2b = vec_ld(16, srcbis);
+ const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm);
+// srcbis += srcStride;
+
+ vector signed short srcM2ssA = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcM2);
+ vector signed short srcM2ssB = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcM2);
+ vector signed short srcM1ssA = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcM1);
+ vector signed short srcM1ssB = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcM1);
+ vector signed short srcP0ssA = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP0);
+ vector signed short srcP0ssB = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP0);
+ vector signed short srcP1ssA = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP1);
+ vector signed short srcP1ssB = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP1);
+ vector signed short srcP2ssA = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP2);
+ vector signed short srcP2ssB = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP2);
+
+ vector signed short pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
+ psumA, psumB, sumA, sumB,
+ srcP3ssA, srcP3ssB,
+ sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
+
+ vector unsigned char sum, dst1, dst2, vdst, fsum, rsum, fdst1, fdst2,
+ srcP3a, srcP3b, srcP3;
+
+ POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
+
+ for (i = 0 ; i < 16 ; i++) {
+ srcP3a = vec_ld(0, srcbis += srcStride);
+ srcP3b = vec_ld(16, srcbis);
+ srcP3 = vec_perm(srcP3a, srcP3b, perm);
+ srcP3ssA = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP3);
+ srcP3ssB = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP3);
+// srcbis += srcStride;
+
+ sum1A = vec_adds(srcP0ssA, srcP1ssA);
+ sum1B = vec_adds(srcP0ssB, srcP1ssB);
+ sum2A = vec_adds(srcM1ssA, srcP2ssA);
+ sum2B = vec_adds(srcM1ssB, srcP2ssB);
+ sum3A = vec_adds(srcM2ssA, srcP3ssA);
+ sum3B = vec_adds(srcM2ssB, srcP3ssB);
+
+ srcM2ssA = srcM1ssA;
+ srcM2ssB = srcM1ssB;
+ srcM1ssA = srcP0ssA;
+ srcM1ssB = srcP0ssB;
+ srcP0ssA = srcP1ssA;
+ srcP0ssB = srcP1ssB;
+ srcP1ssA = srcP2ssA;
+ srcP1ssB = srcP2ssB;
+ srcP2ssA = srcP3ssA;
+ srcP2ssB = srcP3ssB;
+
+ pp1A = vec_mladd(sum1A, v20ss, v16ss);
+ pp1B = vec_mladd(sum1B, v20ss, v16ss);
+
+ pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+ pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+ pp3A = vec_add(sum3A, pp1A);
+ pp3B = vec_add(sum3B, pp1B);
+
+ psumA = vec_sub(pp3A, pp2A);
+ psumB = vec_sub(pp3B, pp2B);
+
+ sumA = vec_sra(psumA, v5us);
+ sumB = vec_sra(psumB, v5us);
+
+ sum = vec_packsu(sumA, sumB);
+
+ dst1 = vec_ld(0, dst);
+ dst2 = vec_ld(16, dst);
+ vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+ OP_U8_ALTIVEC(fsum, sum, vdst);
+
+ rsum = vec_perm(fsum, fsum, dstperm);
+ fdst1 = vec_sel(dst1, rsum, dstmask);
+ fdst2 = vec_sel(rsum, dst2, dstmask);
+
+ vec_st(fdst1, 0, dst);
+ vec_st(fdst2, 16, dst);
+
+ dst += dstStride;
+ }
+ POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1);
+}
+
+/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
+static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
+ POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+ register int i;
+ const vector signed int vzero = vec_splat_s32(0);
+ const vector unsigned char permM2 = vec_lvsl(-2, src);
+ const vector unsigned char permM1 = vec_lvsl(-1, src);
+ const vector unsigned char permP0 = vec_lvsl(+0, src);
+ const vector unsigned char permP1 = vec_lvsl(+1, src);
+ const vector unsigned char permP2 = vec_lvsl(+2, src);
+ const vector unsigned char permP3 = vec_lvsl(+3, src);
+ const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
+ const vector unsigned int v10ui = vec_splat_u32(10);
+ const vector signed short v5ss = vec_splat_s16(5);
+ const vector signed short v1ss = vec_splat_s16(1);
+ const vector signed int v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
+ const vector unsigned int v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));
+
+ register int align = ((((unsigned long)src) - 2) % 16);
+
+ const vector unsigned char neg1 = (const vector unsigned char)
+ vec_splat_s8(-1);
+
+ vector signed short srcP0A, srcP0B, srcP1A, srcP1B,
+ srcP2A, srcP2B, srcP3A, srcP3B,
+ srcM1A, srcM1B, srcM2A, srcM2B,
+ sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
+ pp1A, pp1B, pp2A, pp2B, psumA, psumB;
+
+ const vector unsigned char dstperm = vec_lvsr(0, dst);
+
+ const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm);
+
+ const vector unsigned char mperm = (const vector unsigned char)
+ AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
+ 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
+ int16_t *tmpbis = tmp;
+
+ vector signed short tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
+ tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
+ tmpP2ssA, tmpP2ssB;
+
+ vector signed int pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
+ pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
+ pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
+ ssumAe, ssumAo, ssumBe, ssumBo;
+ vector unsigned char fsum, sumv, sum, dst1, dst2, vdst,
+ rsum, fdst1, fdst2;
+ vector signed short ssume, ssumo;
+
+ POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+ src -= (2 * srcStride);
+ for (i = 0 ; i < 21 ; i ++) {
+ vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
+ vector unsigned char srcR1 = vec_ld(-2, src);
+ vector unsigned char srcR2 = vec_ld(14, src);
+
+ switch (align) {
+ default: {
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = vec_perm(srcR1, srcR2, permP2);
+ srcP3 = vec_perm(srcR1, srcR2, permP3);
+ } break;
+ case 11: {
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = vec_perm(srcR1, srcR2, permP2);
+ srcP3 = srcR2;
+ } break;
+ case 12: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = vec_perm(srcR1, srcR2, permP1);
+ srcP2 = srcR2;
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 13: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = vec_perm(srcR1, srcR2, permP0);
+ srcP1 = srcR2;
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 14: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = vec_perm(srcR1, srcR2, permM1);
+ srcP0 = srcR2;
+ srcP1 = vec_perm(srcR2, srcR3, permP1);
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ case 15: {
+ vector unsigned char srcR3 = vec_ld(30, src);
+ srcM2 = vec_perm(srcR1, srcR2, permM2);
+ srcM1 = srcR2;
+ srcP0 = vec_perm(srcR2, srcR3, permP0);
+ srcP1 = vec_perm(srcR2, srcR3, permP1);
+ srcP2 = vec_perm(srcR2, srcR3, permP2);
+ srcP3 = vec_perm(srcR2, srcR3, permP3);
+ } break;
+ }
+
+ srcP0A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP0);
+ srcP0B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP0);
+ srcP1A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP1);
+ srcP1B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP1);
+
+ srcP2A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP2);
+ srcP2B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP2);
+ srcP3A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcP3);
+ srcP3B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcP3);
+
+ srcM1A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcM1);
+ srcM1B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcM1);
+ srcM2A = (vector signed short)
+ vec_mergeh((vector unsigned char)vzero, srcM2);
+ srcM2B = (vector signed short)
+ vec_mergel((vector unsigned char)vzero, srcM2);
+
+ sum1A = vec_adds(srcP0A, srcP1A);
+ sum1B = vec_adds(srcP0B, srcP1B);
+ sum2A = vec_adds(srcM1A, srcP2A);
+ sum2B = vec_adds(srcM1B, srcP2B);
+ sum3A = vec_adds(srcM2A, srcP3A);
+ sum3B = vec_adds(srcM2B, srcP3B);
+
+ pp1A = vec_mladd(sum1A, v20ss, sum3A);
+ pp1B = vec_mladd(sum1B, v20ss, sum3B);
+
+ pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero);
+ pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero);
+
+ psumA = vec_sub(pp1A, pp2A);
+ psumB = vec_sub(pp1B, pp2B);
+
+ vec_st(psumA, 0, tmp);
+ vec_st(psumB, 16, tmp);
+
+ src += srcStride;
+ tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
+ }
+
+ tmpM2ssA = vec_ld(0, tmpbis);
+ tmpM2ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+ tmpM1ssA = vec_ld(0, tmpbis);
+ tmpM1ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+ tmpP0ssA = vec_ld(0, tmpbis);
+ tmpP0ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+ tmpP1ssA = vec_ld(0, tmpbis);
+ tmpP1ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+ tmpP2ssA = vec_ld(0, tmpbis);
+ tmpP2ssB = vec_ld(16, tmpbis);
+ tmpbis += tmpStride;
+
+ for (i = 0 ; i < 16 ; i++) {
+ const vector signed short tmpP3ssA = vec_ld(0, tmpbis);
+ const vector signed short tmpP3ssB = vec_ld(16, tmpbis);
+
+ const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
+ const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
+ const vector signed short sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
+ const vector signed short sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
+ const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
+ const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
+
+ tmpbis += tmpStride;
+
+ tmpM2ssA = tmpM1ssA;
+ tmpM2ssB = tmpM1ssB;
+ tmpM1ssA = tmpP0ssA;
+ tmpM1ssB = tmpP0ssB;
+ tmpP0ssA = tmpP1ssA;
+ tmpP0ssB = tmpP1ssB;
+ tmpP1ssA = tmpP2ssA;
+ tmpP1ssB = tmpP2ssB;
+ tmpP2ssA = tmpP3ssA;
+ tmpP2ssB = tmpP3ssB;
+
+ pp1Ae = vec_mule(sum1A, v20ss);
+ pp1Ao = vec_mulo(sum1A, v20ss);
+ pp1Be = vec_mule(sum1B, v20ss);
+ pp1Bo = vec_mulo(sum1B, v20ss);
+
+ pp2Ae = vec_mule(sum2A, v5ss);
+ pp2Ao = vec_mulo(sum2A, v5ss);
+ pp2Be = vec_mule(sum2B, v5ss);
+ pp2Bo = vec_mulo(sum2B, v5ss);
+
+ pp3Ae = vec_sra((vector signed int)sum3A, v16ui);
+ pp3Ao = vec_mulo(sum3A, v1ss);
+ pp3Be = vec_sra((vector signed int)sum3B, v16ui);
+ pp3Bo = vec_mulo(sum3B, v1ss);
+
+ pp1cAe = vec_add(pp1Ae, v512si);
+ pp1cAo = vec_add(pp1Ao, v512si);
+ pp1cBe = vec_add(pp1Be, v512si);
+ pp1cBo = vec_add(pp1Bo, v512si);
+
+ pp32Ae = vec_sub(pp3Ae, pp2Ae);
+ pp32Ao = vec_sub(pp3Ao, pp2Ao);
+ pp32Be = vec_sub(pp3Be, pp2Be);
+ pp32Bo = vec_sub(pp3Bo, pp2Bo);
+
+ sumAe = vec_add(pp1cAe, pp32Ae);
+ sumAo = vec_add(pp1cAo, pp32Ao);
+ sumBe = vec_add(pp1cBe, pp32Be);
+ sumBo = vec_add(pp1cBo, pp32Bo);
+
+ ssumAe = vec_sra(sumAe, v10ui);
+ ssumAo = vec_sra(sumAo, v10ui);
+ ssumBe = vec_sra(sumBe, v10ui);
+ ssumBo = vec_sra(sumBo, v10ui);
+
+ ssume = vec_packs(ssumAe, ssumBe);
+ ssumo = vec_packs(ssumAo, ssumBo);
+
+ sumv = vec_packsu(ssume, ssumo);
+ sum = vec_perm(sumv, sumv, mperm);
+
+ dst1 = vec_ld(0, dst);
+ dst2 = vec_ld(16, dst);
+ vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst));
+
+ OP_U8_ALTIVEC(fsum, sum, vdst);
+
+ rsum = vec_perm(fsum, fsum, dstperm);
+ fdst1 = vec_sel(dst1, rsum, dstmask);
+ fdst2 = vec_sel(rsum, dst2, dstmask);
+
+ vec_st(fdst1, 0, dst);
+ vec_st(fdst2, 16, dst);
+
+ dst += dstStride;
+ }
+ POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1);
+}
diff --git a/src/libffmpeg/libavcodec/ppc/mathops.h b/src/libffmpeg/libavcodec/ppc/mathops.h
new file mode 100644
index 000000000..6af23f246
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/mathops.h
@@ -0,0 +1,33 @@
+/*
+ * simple math operations
+ * Copyright (c) 2001, 2002 Fabrice Bellard.
+ * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#if defined(ARCH_POWERPC_405)
+/* signed 16x16 -> 32 multiply add accumulate */
+# define MAC16(rt, ra, rb) \
+ asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb));
+
+/* signed 16x16 -> 32 multiply */
+# define MUL16(ra, rb) \
+ ({ int __rt;
+ asm ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb));
+ __rt; })
+#endif
diff --git a/src/libffmpeg/libavcodec/ppc/snow_altivec.c b/src/libffmpeg/libavcodec/ppc/snow_altivec.c
new file mode 100644
index 000000000..b15672ffe
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/snow_altivec.c
@@ -0,0 +1,788 @@
+/*
+ * Altivec optimized snow DSP utils
+ * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+#include "dsputil_altivec.h"
+#include "../snow.h"
+
+#undef NDEBUG
+#include <assert.h>
+
+
+
+//FIXME remove this replication
+#define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num)))
+
+static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line)
+{
+ int offset;
+ DWTELEM * buffer;
+
+// av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line);
+
+ assert(buf->data_stack_top >= 0);
+// assert(!buf->line[line]);
+ if (buf->line[line])
+ return buf->line[line];
+
+ offset = buf->line_width * line;
+ buffer = buf->data_stack[buf->data_stack_top];
+ buf->data_stack_top--;
+ buf->line[line] = buffer;
+
+// av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1);
+
+ return buffer;
+}
+
+
+//altivec code
+
+void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width)
+{
+ const int w2= (width+1)>>1;
+ DECLARE_ALIGNED_16(DWTELEM, temp[(width>>1)]);
+ const int w_l= (width>>1);
+ const int w_r= w2 - 1;
+ int i;
+ vector signed int t1, t2, x, y, tmp1, tmp2;
+ vector signed int *vbuf, *vtmp;
+ vector unsigned char align;
+
+
+
+ { // Lift 0
+ DWTELEM * const ref = b + w2 - 1;
+ DWTELEM b_0 = b[0];
+ vbuf = (vector signed int *)b;
+
+ tmp1 = vec_ld (0, ref);
+ align = vec_lvsl (0, ref);
+ tmp2 = vec_ld (15, ref);
+ t1= vec_perm(tmp1, tmp2, align);
+
+ i = 0;
+
+ for (i=0; i<w_l-15; i+=16) {
+#if 0
+ b[i+0] = b[i+0] - ((3 * (ref[i+0] + ref[i+1]) + 4) >> 3);
+ b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3);
+ b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3);
+ b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3);
+#else
+
+ tmp1 = vec_ld (0, ref+4+i);
+ tmp2 = vec_ld (15, ref+4+i);
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_add(vec_add(y,y),y);
+
+ tmp1 = vec_ld (0, ref+8+i);
+
+ y = vec_add(y, vec_splat_s32(4));
+ y = vec_sra(y, vec_splat_u32(3));
+
+ tmp2 = vec_ld (15, ref+8+i);
+
+ *vbuf = vec_sub(*vbuf, y);
+
+ t1=t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_add(vec_add(y,y),y);
+
+ tmp1 = vec_ld (0, ref+12+i);
+
+ y = vec_add(y, vec_splat_s32(4));
+ y = vec_sra(y, vec_splat_u32(3));
+
+ tmp2 = vec_ld (15, ref+12+i);
+
+ *vbuf = vec_sub(*vbuf, y);
+
+ t1=t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_add(vec_add(y,y),y);
+
+ tmp1 = vec_ld (0, ref+16+i);
+
+ y = vec_add(y, vec_splat_s32(4));
+ y = vec_sra(y, vec_splat_u32(3));
+
+ tmp2 = vec_ld (15, ref+16+i);
+
+ *vbuf = vec_sub(*vbuf, y);
+
+ t1=t2;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_add(vec_add(y,y),y);
+
+ vbuf++;
+
+ y = vec_add(y, vec_splat_s32(4));
+ y = vec_sra(y, vec_splat_u32(3));
+ *vbuf = vec_sub(*vbuf, y);
+
+ t1=t2;
+
+ vbuf++;
+
+#endif
+ }
+
+ snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
+ b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
+ }
+
+ { // Lift 1
+ DWTELEM * const dst = b+w2;
+
+ i = 0;
+ for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
+ dst[i] = dst[i] - (b[i] + b[i + 1]);
+ }
+
+ align = vec_lvsl(0, b+i);
+ tmp1 = vec_ld(0, b+i);
+ vbuf = (vector signed int*) (dst + i);
+ tmp2 = vec_ld(15, b+i);
+
+ t1 = vec_perm(tmp1, tmp2, align);
+
+ for (; i<w_r-3; i+=4) {
+
+#if 0
+ dst[i] = dst[i] - (b[i] + b[i + 1]);
+ dst[i+1] = dst[i+1] - (b[i+1] + b[i + 2]);
+ dst[i+2] = dst[i+2] - (b[i+2] + b[i + 3]);
+ dst[i+3] = dst[i+3] - (b[i+3] + b[i + 4]);
+#else
+
+ tmp1 = vec_ld(0, b+4+i);
+ tmp2 = vec_ld(15, b+4+i);
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1, vec_sld(t1,t2,4));
+ *vbuf = vec_sub (*vbuf, y);
+
+ vbuf++;
+
+ t1 = t2;
+
+#endif
+
+ }
+
+ snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
+ }
+
+ { // Lift 2
+ DWTELEM * const ref = b+w2 - 1;
+ DWTELEM b_0 = b[0];
+ vbuf= (vector signed int *) b;
+
+ tmp1 = vec_ld (0, ref);
+ align = vec_lvsl (0, ref);
+ tmp2 = vec_ld (15, ref);
+ t1= vec_perm(tmp1, tmp2, align);
+
+ i = 0;
+ for (; i<w_l-15; i+=16) {
+#if 0
+ b[i] = b[i] - (((8 -(ref[i] + ref[i+1])) - (b[i] <<2)) >> 4);
+ b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4);
+ b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4);
+ b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4);
+#else
+ tmp1 = vec_ld (0, ref+4+i);
+ tmp2 = vec_ld (15, ref+4+i);
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_sub(vec_splat_s32(8),y);
+
+ tmp1 = vec_ld (0, ref+8+i);
+
+ x = vec_sl(*vbuf,vec_splat_u32(2));
+ y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+ tmp2 = vec_ld (15, ref+8+i);
+
+ *vbuf = vec_sub( *vbuf, y);
+
+ t1 = t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_sub(vec_splat_s32(8),y);
+
+ tmp1 = vec_ld (0, ref+12+i);
+
+ x = vec_sl(*vbuf,vec_splat_u32(2));
+ y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+ tmp2 = vec_ld (15, ref+12+i);
+
+ *vbuf = vec_sub( *vbuf, y);
+
+ t1 = t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_sub(vec_splat_s32(8),y);
+
+ tmp1 = vec_ld (0, ref+16+i);
+
+ x = vec_sl(*vbuf,vec_splat_u32(2));
+ y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+
+ tmp2 = vec_ld (15, ref+16+i);
+
+ *vbuf = vec_sub( *vbuf, y);
+
+ t1 = t2;
+
+ vbuf++;
+
+ t2 = vec_perm(tmp1, tmp2, align);
+
+ y = vec_add(t1,vec_sld(t1,t2,4));
+ y = vec_sub(vec_splat_s32(8),y);
+
+ t1 = t2;
+
+ x = vec_sl(*vbuf,vec_splat_u32(2));
+ y = vec_sra(vec_sub(y,x),vec_splat_u32(4));
+ *vbuf = vec_sub( *vbuf, y);
+
+ vbuf++;
+
+#endif
+ }
+
+ snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
+ b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
+ }
+
+ { // Lift 3
+ DWTELEM * const src = b+w2;
+
+ vbuf = (vector signed int *)b;
+ vtmp = (vector signed int *)temp;
+
+ i = 0;
+ align = vec_lvsl(0, src);
+
+ for (; i<w_r-3; i+=4) {
+#if 0
+ temp[i] = src[i] - ((-3*(b[i] + b[i+1]))>>1);
+ temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1);
+ temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1);
+ temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1);
+#else
+ tmp1 = vec_ld(0,src+i);
+ t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4));
+ tmp2 = vec_ld(15,src+i);
+ t1 = vec_sub(vec_splat_s32(0),t1); //bad!
+ t1 = vec_add(t1,vec_add(t1,t1));
+ t2 = vec_perm(tmp1 ,tmp2 ,align);
+ t1 = vec_sra(t1,vec_splat_u32(1));
+ vbuf++;
+ *vtmp = vec_sub(t2,t1);
+ vtmp++;
+
+#endif
+
+ }
+
+ snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1);
+ }
+
+ {
+ //Interleave
+ int a;
+ vector signed int *t = (vector signed int *)temp,
+ *v = (vector signed int *)b;
+
+ snow_interleave_line_header(&i, width, b, temp);
+
+ for (; (i & 0xE) != 0xE; i-=2){
+ b[i+1] = temp[i>>1];
+ b[i] = b[i>>1];
+ }
+ for (i-=14; i>=0; i-=16){
+ a=i/4;
+
+ v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]);
+ v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]);
+ v[a+1]=vec_mergel(v[a>>1],t[a>>1]);
+ v[a]=vec_mergeh(v[a>>1],t[a>>1]);
+
+ }
+
+ }
+}
+
+void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width)
+{
+ int i, w4 = width/4;
+ vector signed int *v0, *v1,*v2,*v3,*v4,*v5;
+ vector signed int t1, t2;
+
+ v0=(vector signed int *)b0;
+ v1=(vector signed int *)b1;
+ v2=(vector signed int *)b2;
+ v3=(vector signed int *)b3;
+ v4=(vector signed int *)b4;
+ v5=(vector signed int *)b5;
+
+ for (i=0; i< w4;i++)
+ {
+
+ #if 0
+ b4[i] -= (3*(b3[i] + b5[i])+4)>>3;
+ b3[i] -= ((b2[i] + b4[i]));
+ b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4;
+ b1[i] += (3*(b0[i] + b2[i]))>>1;
+ #else
+ t1 = vec_add(v3[i], v5[i]);
+ t2 = vec_add(t1, vec_add(t1,t1));
+ t1 = vec_add(t2, vec_splat_s32(4));
+ v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3)));
+
+ v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i]));
+
+ t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i]));
+ t2 = vec_sl(v2[i], vec_splat_u32(2));
+ v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4)));
+ t1 = vec_add(v0[i], v2[i]);
+ t2 = vec_add(t1, vec_add(t1,t1));
+ v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1)));
+
+ #endif
+ }
+
+ for(i*=4; i < width; i++)
+ {
+ b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
+ b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
+ b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
+ b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
+ }
+}
+
+#define LOAD_BLOCKS \
+ tmp1 = vec_ld(0, &block[3][y*src_stride]);\
+ align = vec_lvsl(0, &block[3][y*src_stride]);\
+ tmp2 = vec_ld(15, &block[3][y*src_stride]);\
+\
+ b3 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, &block[2][y*src_stride]);\
+ align = vec_lvsl(0, &block[2][y*src_stride]);\
+ tmp2 = vec_ld(15, &block[2][y*src_stride]);\
+\
+ b2 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, &block[1][y*src_stride]);\
+ align = vec_lvsl(0, &block[1][y*src_stride]);\
+ tmp2 = vec_ld(15, &block[1][y*src_stride]);\
+\
+ b1 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, &block[0][y*src_stride]);\
+ align = vec_lvsl(0, &block[0][y*src_stride]);\
+ tmp2 = vec_ld(15, &block[0][y*src_stride]);\
+\
+ b0 = vec_perm(tmp1,tmp2,align);
+
+#define LOAD_OBMCS \
+ tmp1 = vec_ld(0, obmc1);\
+ align = vec_lvsl(0, obmc1);\
+ tmp2 = vec_ld(15, obmc1);\
+\
+ ob1 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, obmc2);\
+ align = vec_lvsl(0, obmc2);\
+ tmp2 = vec_ld(15, obmc2);\
+\
+ ob2 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, obmc3);\
+ align = vec_lvsl(0, obmc3);\
+ tmp2 = vec_ld(15, obmc3);\
+\
+ ob3 = vec_perm(tmp1,tmp2,align);\
+\
+ tmp1 = vec_ld(0, obmc4);\
+ align = vec_lvsl(0, obmc4);\
+ tmp2 = vec_ld(15, obmc4);\
+\
+ ob4 = vec_perm(tmp1,tmp2,align);
+
+/* interleave logic
+ * h1 <- [ a,b,a,b, a,b,a,b, a,b,a,b, a,b,a,b ]
+ * h2 <- [ c,d,c,d, c,d,c,d, c,d,c,d, c,d,c,d ]
+ * h <- [ a,b,c,d, a,b,c,d, a,b,c,d, a,b,c,d ]
+ */
+
+#define STEPS_0_1\
+ h1 = (vector unsigned short)\
+ vec_mergeh(ob1, ob2);\
+\
+ h2 = (vector unsigned short)\
+ vec_mergeh(ob3, ob4);\
+\
+ ih = (vector unsigned char)\
+ vec_mergeh(h1,h2);\
+\
+ l1 = (vector unsigned short) vec_mergeh(b3, b2);\
+\
+ ih1 = (vector unsigned char) vec_mergel(h1, h2);\
+\
+ l2 = (vector unsigned short) vec_mergeh(b1, b0);\
+\
+ il = (vector unsigned char) vec_mergeh(l1, l2);\
+\
+ v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
+\
+ il1 = (vector unsigned char) vec_mergel(l1, l2);\
+\
+ v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
+
+#define FINAL_STEP_SCALAR\
+ for(x=0; x<b_w; x++)\
+ if(add){\
+ vbuf[x] += dst[x + src_x];\
+ vbuf[x] = (vbuf[x] + (1<<(FRAC_BITS-1))) >> FRAC_BITS;\
+ if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\
+ dst8[x + y*src_stride] = vbuf[x];\
+ }else{\
+ dst[x + src_x] -= vbuf[x];\
+ }
+
+static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc,
+ const int obmc_stride,
+ uint8_t * * block, int b_w,
+ int b_h, int src_x, int src_y,
+ int src_stride, slice_buffer * sb,
+ int add, uint8_t * dst8)
+{
+ int y, x;
+ DWTELEM * dst;
+ vector unsigned short h1, h2, l1, l2;
+ vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+ vector unsigned char b0,b1,b2,b3;
+ vector unsigned char ob1,ob2,ob3,ob4;
+
+ DECLARE_ALIGNED_16(int, vbuf[16]);
+ vector signed int *v = (vector signed int *)vbuf, *d;
+
+ for(y=0; y<b_h; y++){
+ //FIXME ugly missue of obmc_stride
+
+ uint8_t *obmc1= obmc + y*obmc_stride;
+ uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+ uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+ uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+ dst = slice_buffer_get_line(sb, src_y + y);
+ d = (vector signed int *)(dst + src_x);
+
+//FIXME i could avoid some loads!
+
+ // load blocks
+ LOAD_BLOCKS
+
+ // load obmcs
+ LOAD_OBMCS
+
+ // steps 0 1
+ STEPS_0_1
+
+ FINAL_STEP_SCALAR
+
+ }
+
+}
+
+#define STEPS_2_3\
+ h1 = (vector unsigned short) vec_mergel(ob1, ob2);\
+\
+ h2 = (vector unsigned short) vec_mergel(ob3, ob4);\
+\
+ ih = (vector unsigned char) vec_mergeh(h1,h2);\
+\
+ l1 = (vector unsigned short) vec_mergel(b3, b2);\
+\
+ l2 = (vector unsigned short) vec_mergel(b1, b0);\
+\
+ ih1 = (vector unsigned char) vec_mergel(h1,h2);\
+\
+ il = (vector unsigned char) vec_mergeh(l1,l2);\
+\
+ v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\
+\
+ il1 = (vector unsigned char) vec_mergel(l1,l2);\
+\
+ v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0));
+
+
+static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc,
+ const int obmc_stride,
+ uint8_t * * block, int b_w,
+ int b_h, int src_x, int src_y,
+ int src_stride, slice_buffer * sb,
+ int add, uint8_t * dst8)
+{
+ int y, x;
+ DWTELEM * dst;
+ vector unsigned short h1, h2, l1, l2;
+ vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+ vector unsigned char b0,b1,b2,b3;
+ vector unsigned char ob1,ob2,ob3,ob4;
+ DECLARE_ALIGNED_16(int, vbuf[b_w]);
+ vector signed int *v = (vector signed int *)vbuf, *d;
+
+ for(y=0; y<b_h; y++){
+ //FIXME ugly missue of obmc_stride
+
+ uint8_t *obmc1= obmc + y*obmc_stride;
+ uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+ uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+ uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+ dst = slice_buffer_get_line(sb, src_y + y);
+ d = (vector signed int *)(dst + src_x);
+
+ // load blocks
+ LOAD_BLOCKS
+
+ // load obmcs
+ LOAD_OBMCS
+
+ // steps 0 1 2 3
+ STEPS_0_1
+
+ STEPS_2_3
+
+ FINAL_STEP_SCALAR
+
+ }
+}
+
+#define FINAL_STEP_VEC \
+\
+ if(add)\
+ {\
+ for(x=0; x<b_w/4; x++)\
+ {\
+ v[x] = vec_add(v[x], d[x]);\
+ v[x] = vec_sra(vec_add(v[x],\
+ vec_sl( vec_splat_s32(1),\
+ vec_splat_u32(7))),\
+ vec_splat_u32(8));\
+\
+ mask = (vector bool int) vec_sl((vector signed int)\
+ vec_cmpeq(v[x],v[x]),vec_splat_u32(8));\
+ mask = (vector bool int) vec_and(v[x],vec_nor(mask,mask));\
+\
+ mask = (vector bool int)\
+ vec_cmpeq((vector signed int)mask,\
+ (vector signed int)vec_splat_u32(0));\
+\
+ vs = vec_sra(v[x],vec_splat_u32(8));\
+ vs = vec_sra(v[x],vec_splat_u32(8));\
+ vs = vec_sra(v[x],vec_splat_u32(15));\
+\
+ vs = vec_nor(vs,vs);\
+\
+ v[x]= vec_sel(v[x],vs,mask);\
+ }\
+\
+ for(x=0; x<b_w; x++)\
+ dst8[x + y*src_stride] = vbuf[x];\
+\
+ }\
+ else\
+ for(x=0; x<b_w/4; x++)\
+ d[x] = vec_sub(d[x], v[x]);
+
+static void inner_add_yblock_a_bw_8_obmc_16_altivec(uint8_t *obmc,
+ const int obmc_stride,
+ uint8_t * * block, int b_w,
+ int b_h, int src_x, int src_y,
+ int src_stride, slice_buffer * sb,
+ int add, uint8_t * dst8)
+{
+ int y, x;
+ DWTELEM * dst;
+ vector bool int mask;
+ vector signed int vs;
+ vector unsigned short h1, h2, l1, l2;
+ vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+ vector unsigned char b0,b1,b2,b3;
+ vector unsigned char ob1,ob2,ob3,ob4;
+
+ DECLARE_ALIGNED_16(int, vbuf[16]);
+ vector signed int *v = (vector signed int *)vbuf, *d;
+
+ for(y=0; y<b_h; y++){
+ //FIXME ugly missue of obmc_stride
+
+ uint8_t *obmc1= obmc + y*obmc_stride;
+ uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+ uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+ uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+ dst = slice_buffer_get_line(sb, src_y + y);
+ d = (vector signed int *)(dst + src_x);
+
+//FIXME i could avoid some loads!
+
+ // load blocks
+ LOAD_BLOCKS
+
+ // load obmcs
+ LOAD_OBMCS
+
+ // steps 0 1
+ STEPS_0_1
+
+ FINAL_STEP_VEC
+
+ }
+
+}
+
+static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc,
+ const int obmc_stride,
+ uint8_t * * block, int b_w,
+ int b_h, int src_x, int src_y,
+ int src_stride, slice_buffer * sb,
+ int add, uint8_t * dst8)
+{
+ int y, x;
+ DWTELEM * dst;
+ vector bool int mask;
+ vector signed int vs;
+ vector unsigned short h1, h2, l1, l2;
+ vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align;
+ vector unsigned char b0,b1,b2,b3;
+ vector unsigned char ob1,ob2,ob3,ob4;
+ DECLARE_ALIGNED_16(int, vbuf[b_w]);
+ vector signed int *v = (vector signed int *)vbuf, *d;
+
+ for(y=0; y<b_h; y++){
+ //FIXME ugly missue of obmc_stride
+
+ uint8_t *obmc1= obmc + y*obmc_stride;
+ uint8_t *obmc2= obmc1+ (obmc_stride>>1);
+ uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
+ uint8_t *obmc4= obmc3+ (obmc_stride>>1);
+
+ dst = slice_buffer_get_line(sb, src_y + y);
+ d = (vector signed int *)(dst + src_x);
+
+ // load blocks
+ LOAD_BLOCKS
+
+ // load obmcs
+ LOAD_OBMCS
+
+ // steps 0 1 2 3
+ STEPS_0_1
+
+ STEPS_2_3
+
+ FINAL_STEP_VEC
+
+ }
+}
+
+
+void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride,
+ uint8_t * * block, int b_w, int b_h,
+ int src_x, int src_y, int src_stride,
+ slice_buffer * sb, int add,
+ uint8_t * dst8)
+{
+ if (src_x&15) {
+ if (b_w == 16)
+ inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
+ b_w, b_h, src_x, src_y,
+ src_stride, sb, add, dst8);
+ else if (b_w == 8)
+ inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
+ b_w, b_h, src_x, src_y,
+ src_stride, sb, add, dst8);
+ else
+ ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
+ src_y, src_stride, sb, add, dst8);
+ } else {
+ if (b_w == 16)
+ inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block,
+ b_w, b_h, src_x, src_y,
+ src_stride, sb, add, dst8);
+ else if (b_w == 8)
+ inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block,
+ b_w, b_h, src_x, src_y,
+ src_stride, sb, add, dst8);
+ else
+ ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,
+ src_y, src_stride, sb, add, dst8);
+ }
+}
+
+
+void snow_init_altivec(DSPContext* c, AVCodecContext *avctx)
+{
+ c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec;
+ c->vertical_compose97i = ff_snow_vertical_compose97i_altivec;
+ c->inner_add_yblock = ff_snow_inner_add_yblock_altivec;
+}
diff --git a/src/libffmpeg/libavcodec/ppc/types_altivec.h b/src/libffmpeg/libavcodec/ppc/types_altivec.h
new file mode 100644
index 000000000..f29026e04
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/types_altivec.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2006 Guillaume Poirier <gpoirier@mplayerhq.hu>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/***********************************************************************
+ * Vector types
+ **********************************************************************/
+#define vec_u8_t vector unsigned char
+#define vec_s8_t vector signed char
+#define vec_u16_t vector unsigned short
+#define vec_s16_t vector signed short
+#define vec_u32_t vector unsigned int
+#define vec_s32_t vector signed int
+
+/***********************************************************************
+ * Null vector
+ **********************************************************************/
+#define LOAD_ZERO const vec_u8_t zerov = vec_splat_u8( 0 )
+
+#define zero_u8v (vec_u8_t) zerov
+#define zero_s8v (vec_s8_t) zerov
+#define zero_u16v (vec_u16_t) zerov
+#define zero_s16v (vec_s16_t) zerov
+#define zero_u32v (vec_u32_t) zerov
+#define zero_s32v (vec_s32_t) zerov
diff --git a/src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c b/src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c
new file mode 100644
index 000000000..114c9d41f
--- /dev/null
+++ b/src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c
@@ -0,0 +1,338 @@
+/*
+ * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
+ * Copyright (c) 2006 Konstantin Shishkov
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#include "../dsputil.h"
+
+#include "gcc_fixes.h"
+
+#include "dsputil_altivec.h"
+
+// main steps of 8x8 transform
+#define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
+do { \
+ t0 = vec_sl(vec_add(s0, s4), vec_2); \
+ t0 = vec_add(vec_sl(t0, vec_1), t0); \
+ t0 = vec_add(t0, vec_rnd); \
+ t1 = vec_sl(vec_sub(s0, s4), vec_2); \
+ t1 = vec_add(vec_sl(t1, vec_1), t1); \
+ t1 = vec_add(t1, vec_rnd); \
+ t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
+ t2 = vec_add(t2, vec_sl(s2, vec_4)); \
+ t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
+ t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
+ t4 = vec_add(t0, t2); \
+ t5 = vec_add(t1, t3); \
+ t6 = vec_sub(t1, t3); \
+ t7 = vec_sub(t0, t2); \
+\
+ t0 = vec_sl(vec_add(s1, s3), vec_4); \
+ t0 = vec_add(t0, vec_sl(s5, vec_3)); \
+ t0 = vec_add(t0, vec_sl(s7, vec_2)); \
+ t0 = vec_add(t0, vec_sub(s5, s3)); \
+\
+ t1 = vec_sl(vec_sub(s1, s5), vec_4); \
+ t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
+ t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
+ t1 = vec_sub(t1, vec_add(s1, s7)); \
+\
+ t2 = vec_sl(vec_sub(s7, s3), vec_4); \
+ t2 = vec_add(t2, vec_sl(s1, vec_3)); \
+ t2 = vec_add(t2, vec_sl(s5, vec_2)); \
+ t2 = vec_add(t2, vec_sub(s1, s7)); \
+\
+ t3 = vec_sl(vec_sub(s5, s7), vec_4); \
+ t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
+ t3 = vec_add(t3, vec_sl(s1, vec_2)); \
+ t3 = vec_sub(t3, vec_add(s3, s5)); \
+\
+ s0 = vec_add(t4, t0); \
+ s1 = vec_add(t5, t1); \
+ s2 = vec_add(t6, t2); \
+ s3 = vec_add(t7, t3); \
+ s4 = vec_sub(t7, t3); \
+ s5 = vec_sub(t6, t2); \
+ s6 = vec_sub(t5, t1); \
+ s7 = vec_sub(t4, t0); \
+}while(0)
+
+#define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
+do { \
+ s0 = vec_sra(s0, vec_3); \
+ s1 = vec_sra(s1, vec_3); \
+ s2 = vec_sra(s2, vec_3); \
+ s3 = vec_sra(s3, vec_3); \
+ s4 = vec_sra(s4, vec_3); \
+ s5 = vec_sra(s5, vec_3); \
+ s6 = vec_sra(s6, vec_3); \
+ s7 = vec_sra(s7, vec_3); \
+}while(0)
+
+#define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
+do { \
+ s0 = vec_sra(s0, vec_7); \
+ s1 = vec_sra(s1, vec_7); \
+ s2 = vec_sra(s2, vec_7); \
+ s3 = vec_sra(s3, vec_7); \
+ s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
+ s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
+ s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
+ s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
+}while(0)
+
+/* main steps of 4x4 transform */
+#define STEP4(s0, s1, s2, s3, vec_rnd) \
+do { \
+ t1 = vec_add(vec_sl(s0, vec_4), s0); \
+ t1 = vec_add(t1, vec_rnd); \
+ t2 = vec_add(vec_sl(s2, vec_4), s2); \
+ t0 = vec_add(t1, t2); \
+ t1 = vec_sub(t1, t2); \
+ t3 = vec_sl(vec_sub(s3, s1), vec_1); \
+ t3 = vec_add(t3, vec_sl(t3, vec_2)); \
+ t2 = vec_add(t3, vec_sl(s1, vec_5)); \
+ t3 = vec_add(t3, vec_sl(s3, vec_3)); \
+ t3 = vec_add(t3, vec_sl(s3, vec_2)); \
+ s0 = vec_add(t0, t2); \
+ s1 = vec_sub(t1, t3); \
+ s2 = vec_add(t1, t3); \
+ s3 = vec_sub(t0, t2); \
+}while (0)
+
+#define SHIFT_HOR4(s0, s1, s2, s3) \
+ s0 = vec_sra(s0, vec_3); \
+ s1 = vec_sra(s1, vec_3); \
+ s2 = vec_sra(s2, vec_3); \
+ s3 = vec_sra(s3, vec_3);
+
+#define SHIFT_VERT4(s0, s1, s2, s3) \
+ s0 = vec_sra(s0, vec_7); \
+ s1 = vec_sra(s1, vec_7); \
+ s2 = vec_sra(s2, vec_7); \
+ s3 = vec_sra(s3, vec_7);
+
+/** Do inverse transform on 8x8 block
+*/
+static void vc1_inv_trans_8x8_altivec(DCTELEM block[64])
+{
+ vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
+ vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
+ vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
+ vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
+ const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
+ const vector unsigned int vec_7 = vec_splat_u32(7);
+ const vector unsigned int vec_5 = vec_splat_u32(5);
+ const vector unsigned int vec_4 = vec_splat_u32(4);
+ const vector signed int vec_4s = vec_splat_s32(4);
+ const vector unsigned int vec_3 = vec_splat_u32(3);
+ const vector unsigned int vec_2 = vec_splat_u32(2);
+ const vector signed int vec_1s = vec_splat_s32(1);
+ const vector unsigned int vec_1 = vec_splat_u32(1);
+
+
+ src0 = vec_ld( 0, block);
+ src1 = vec_ld( 16, block);
+ src2 = vec_ld( 32, block);
+ src3 = vec_ld( 48, block);
+ src4 = vec_ld( 64, block);
+ src5 = vec_ld( 80, block);
+ src6 = vec_ld( 96, block);
+ src7 = vec_ld(112, block);
+
+ TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
+ s0 = vec_unpackl(src0);
+ s1 = vec_unpackl(src1);
+ s2 = vec_unpackl(src2);
+ s3 = vec_unpackl(src3);
+ s4 = vec_unpackl(src4);
+ s5 = vec_unpackl(src5);
+ s6 = vec_unpackl(src6);
+ s7 = vec_unpackl(src7);
+ s8 = vec_unpackh(src0);
+ s9 = vec_unpackh(src1);
+ sA = vec_unpackh(src2);
+ sB = vec_unpackh(src3);
+ sC = vec_unpackh(src4);
+ sD = vec_unpackh(src5);
+ sE = vec_unpackh(src6);
+ sF = vec_unpackh(src7);
+ STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
+ SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
+ STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
+ SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
+ src0 = vec_pack(s8, s0);
+ src1 = vec_pack(s9, s1);
+ src2 = vec_pack(sA, s2);
+ src3 = vec_pack(sB, s3);
+ src4 = vec_pack(sC, s4);
+ src5 = vec_pack(sD, s5);
+ src6 = vec_pack(sE, s6);
+ src7 = vec_pack(sF, s7);
+ TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
+
+ s0 = vec_unpackl(src0);
+ s1 = vec_unpackl(src1);
+ s2 = vec_unpackl(src2);
+ s3 = vec_unpackl(src3);
+ s4 = vec_unpackl(src4);
+ s5 = vec_unpackl(src5);
+ s6 = vec_unpackl(src6);
+ s7 = vec_unpackl(src7);
+ s8 = vec_unpackh(src0);
+ s9 = vec_unpackh(src1);
+ sA = vec_unpackh(src2);
+ sB = vec_unpackh(src3);
+ sC = vec_unpackh(src4);
+ sD = vec_unpackh(src5);
+ sE = vec_unpackh(src6);
+ sF = vec_unpackh(src7);
+ STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
+ SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
+ STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
+ SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
+ src0 = vec_pack(s8, s0);
+ src1 = vec_pack(s9, s1);
+ src2 = vec_pack(sA, s2);
+ src3 = vec_pack(sB, s3);
+ src4 = vec_pack(sC, s4);
+ src5 = vec_pack(sD, s5);
+ src6 = vec_pack(sE, s6);
+ src7 = vec_pack(sF, s7);
+
+ vec_st(src0, 0, block);
+ vec_st(src1, 16, block);
+ vec_st(src2, 32, block);
+ vec_st(src3, 48, block);
+ vec_st(src4, 64, block);
+ vec_st(src5, 80, block);
+ vec_st(src6, 96, block);
+ vec_st(src7,112, block);
+}
+
+/** Do inverse transform on 8x4 part of block
+*/
+static void vc1_inv_trans_8x4_altivec(DCTELEM block[64], int n)
+{
+ vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
+ vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
+ vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
+ vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
+ const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
+ const vector unsigned int vec_7 = vec_splat_u32(7);
+ const vector unsigned int vec_5 = vec_splat_u32(5);
+ const vector unsigned int vec_4 = vec_splat_u32(4);
+ const vector signed int vec_4s = vec_splat_s32(4);
+ const vector unsigned int vec_3 = vec_splat_u32(3);
+ const vector unsigned int vec_2 = vec_splat_u32(2);
+ const vector unsigned int vec_1 = vec_splat_u32(1);
+
+ src0 = vec_ld( 0, block);
+ src1 = vec_ld( 16, block);
+ src2 = vec_ld( 32, block);
+ src3 = vec_ld( 48, block);
+ src4 = vec_ld( 64, block);
+ src5 = vec_ld( 80, block);
+ src6 = vec_ld( 96, block);
+ src7 = vec_ld(112, block);
+
+ TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
+ s0 = vec_unpackl(src0);
+ s1 = vec_unpackl(src1);
+ s2 = vec_unpackl(src2);
+ s3 = vec_unpackl(src3);
+ s4 = vec_unpackl(src4);
+ s5 = vec_unpackl(src5);
+ s6 = vec_unpackl(src6);
+ s7 = vec_unpackl(src7);
+ s8 = vec_unpackh(src0);
+ s9 = vec_unpackh(src1);
+ sA = vec_unpackh(src2);
+ sB = vec_unpackh(src3);
+ sC = vec_unpackh(src4);
+ sD = vec_unpackh(src5);
+ sE = vec_unpackh(src6);
+ sF = vec_unpackh(src7);
+ STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
+ SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
+ STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
+ SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
+ src0 = vec_pack(s8, s0);
+ src1 = vec_pack(s9, s1);
+ src2 = vec_pack(sA, s2);
+ src3 = vec_pack(sB, s3);
+ src4 = vec_pack(sC, s4);
+ src5 = vec_pack(sD, s5);
+ src6 = vec_pack(sE, s6);
+ src7 = vec_pack(sF, s7);
+ TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
+
+ if(!n){ // upper half of block
+ s0 = vec_unpackh(src0);
+ s1 = vec_unpackh(src1);
+ s2 = vec_unpackh(src2);
+ s3 = vec_unpackh(src3);
+ s8 = vec_unpackl(src0);
+ s9 = vec_unpackl(src1);
+ sA = vec_unpackl(src2);
+ sB = vec_unpackl(src3);
+ STEP4(s0, s1, s2, s3, vec_64);
+ SHIFT_VERT4(s0, s1, s2, s3);
+ STEP4(s8, s9, sA, sB, vec_64);
+ SHIFT_VERT4(s8, s9, sA, sB);
+ src0 = vec_pack(s0, s8);
+ src1 = vec_pack(s1, s9);
+ src2 = vec_pack(s2, sA);
+ src3 = vec_pack(s3, sB);
+
+ vec_st(src0, 0, block);
+ vec_st(src1, 16, block);
+ vec_st(src2, 32, block);
+ vec_st(src3, 48, block);
+ } else { //lower half of block
+ s0 = vec_unpackh(src4);
+ s1 = vec_unpackh(src5);
+ s2 = vec_unpackh(src6);
+ s3 = vec_unpackh(src7);
+ s8 = vec_unpackl(src4);
+ s9 = vec_unpackl(src5);
+ sA = vec_unpackl(src6);
+ sB = vec_unpackl(src7);
+ STEP4(s0, s1, s2, s3, vec_64);
+ SHIFT_VERT4(s0, s1, s2, s3);
+ STEP4(s8, s9, sA, sB, vec_64);
+ SHIFT_VERT4(s8, s9, sA, sB);
+ src4 = vec_pack(s0, s8);
+ src5 = vec_pack(s1, s9);
+ src6 = vec_pack(s2, sA);
+ src7 = vec_pack(s3, sB);
+
+ vec_st(src4, 64, block);
+ vec_st(src5, 80, block);
+ vec_st(src6, 96, block);
+ vec_st(src7,112, block);
+ }
+}
+
+
+void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) {
+ dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
+ dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
+}