diff options
author | Mike Melanson <mike@multimedia.cx> | 2006-08-02 07:02:37 +0000 |
---|---|---|
committer | Mike Melanson <mike@multimedia.cx> | 2006-08-02 07:02:37 +0000 |
commit | 5f24c84bddd59e2d1744408f2c3694050d7d3d3d (patch) | |
tree | 044d31ff791ca6d0d60b5624bd31554f7da53876 /src/libffmpeg/libavcodec/ppc | |
parent | feaf0357aaa9f80562900ad9c50b9adf44d9b795 (diff) | |
download | xine-lib-5f24c84bddd59e2d1744408f2c3694050d7d3d3d.tar.gz xine-lib-5f24c84bddd59e2d1744408f2c3694050d7d3d3d.tar.bz2 |
sync to FFmpeg 51.11.0
CVS patchset: 8146
CVS date: 2006/08/02 07:02:37
Diffstat (limited to 'src/libffmpeg/libavcodec/ppc')
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/dsputil_altivec.c | 206 | ||||
-rwxr-xr-x | src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c | 117 | ||||
-rwxr-xr-x | src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c | 576 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/dsputil_ppc.c | 18 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/gcc_fixes.h | 9 |
5 files changed, 557 insertions, 369 deletions
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c index 31464fb7a..81a32c9e3 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c @@ -1308,13 +1308,12 @@ POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } -#ifdef CONFIG_DARWIN int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){ POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1); int sum; -POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); register const_vector unsigned char vzero = (const_vector unsigned char)vec_splat_u8(0); register vector signed short temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; +POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); { register const_vector signed short vprod1 = (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1); register const_vector signed short vprod2 = (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1); @@ -1339,6 +1338,8 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); { \ register vector unsigned char src1, src2, srcO; \ register vector unsigned char dst1, dst2, dstO; \ + register vector signed short srcV, dstV; \ + register vector signed short but0, but1, but2, op1, op2, op3; \ src1 = vec_ld(stride * i, src); \ if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8) \ src2 = vec_ld((stride * i) + 16, src); \ @@ -1349,17 +1350,19 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ /* promote the unsigned chars to signed shorts */ \ /* we're in the 8x8 function, we only care for the first 8 */ \ - register vector signed short srcV = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ - register vector signed short dstV = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ + srcV = \ + (vector signed short)vec_mergeh((vector signed char)vzero, \ + (vector signed char)srcO); \ + dstV = \ + (vector signed short)vec_mergeh((vector signed char)vzero, \ + (vector signed char)dstO); \ /* substractions inside the first butterfly */ \ - register vector signed short but0 = vec_sub(srcV, dstV); \ - register vector signed short op1 = vec_perm(but0, but0, perm1); \ - register vector signed short but1 = vec_mladd(but0, vprod1, op1); \ - register vector signed short op2 = vec_perm(but1, but1, perm2); \ - register vector signed short but2 = vec_mladd(but1, vprod2, op2); \ - register vector signed short op3 = vec_perm(but2, but2, perm3); \ + but0 = vec_sub(srcV, dstV); \ + op1 = vec_perm(but0, but0, perm1); \ + but1 = vec_mladd(but0, vprod1, op1); \ + op2 = vec_perm(but1, but1, perm2); \ + but2 = vec_mladd(but1, vprod2, op2); \ + op3 = vec_perm(but2, but2, perm3); \ res = vec_mladd(but2, vprod3, op3); \ } ONEITERBUTTERFLY(0, temp0); @@ -1442,39 +1445,39 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1); static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) { int sum; register vector signed short - temp0 asm ("v0"), - temp1 asm ("v1"), - temp2 asm ("v2"), - temp3 asm ("v3"), - temp4 asm ("v4"), - temp5 asm ("v5"), - temp6 asm ("v6"), - temp7 asm ("v7"); + temp0 REG_v(v0), + temp1 REG_v(v1), + temp2 REG_v(v2), + temp3 REG_v(v3), + temp4 REG_v(v4), + temp5 REG_v(v5), + temp6 REG_v(v6), + temp7 REG_v(v7); register vector signed short - temp0S asm ("v8"), - temp1S asm ("v9"), - temp2S asm ("v10"), - temp3S asm ("v11"), - temp4S asm ("v12"), - temp5S asm ("v13"), - temp6S asm ("v14"), - temp7S asm ("v15"); - register const_vector unsigned char vzero asm ("v31")= (const_vector unsigned char)vec_splat_u8(0); + temp0S REG_v(v8), + temp1S REG_v(v9), + temp2S REG_v(v10), + temp3S REG_v(v11), + temp4S REG_v(v12), + temp5S REG_v(v13), + temp6S REG_v(v14), + temp7S REG_v(v15); + register const_vector unsigned char vzero REG_v(v31)= (const_vector unsigned char)vec_splat_u8(0); { - register const_vector signed short vprod1 asm ("v16")= (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1); - register const_vector signed short vprod2 asm ("v17")= (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1); - register const_vector signed short vprod3 asm ("v18")= (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1); - register const_vector unsigned char perm1 asm ("v19")= (const_vector unsigned char) + register const_vector signed short vprod1 REG_v(v16)= (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1); + register const_vector signed short vprod2 REG_v(v17)= (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1); + register const_vector signed short vprod3 REG_v(v18)= (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1); + register const_vector unsigned char perm1 REG_v(v19)= (const_vector unsigned char) AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05, 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D); - register const_vector unsigned char perm2 asm ("v20")= (const_vector unsigned char) + register const_vector unsigned char perm2 REG_v(v20)= (const_vector unsigned char) AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B); - register const_vector unsigned char perm3 asm ("v21")= (const_vector unsigned char) + register const_vector unsigned char perm3 REG_v(v21)= (const_vector unsigned char) AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x00, 0x01, 0x02, 0x03, @@ -1482,37 +1485,63 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, #define ONEITERBUTTERFLY(i, res1, res2) \ { \ - register vector unsigned char src1 asm ("v22"), src2 asm ("v23"); \ - register vector unsigned char dst1 asm ("v24"), dst2 asm ("v25"); \ + register vector unsigned char src1 REG_v(v22), \ + src2 REG_v(v23), \ + dst1 REG_v(v24), \ + dst2 REG_v(v25), \ + srcO REG_v(v22), \ + dstO REG_v(v23); \ + \ + register vector signed short srcV REG_v(v24), \ + dstV REG_v(v25), \ + srcW REG_v(v26), \ + dstW REG_v(v27), \ + but0 REG_v(v28), \ + but0S REG_v(v29), \ + op1 REG_v(v30), \ + but1 REG_v(v22), \ + op1S REG_v(v23), \ + but1S REG_v(v24), \ + op2 REG_v(v25), \ + but2 REG_v(v26), \ + op2S REG_v(v27), \ + but2S REG_v(v28), \ + op3 REG_v(v29), \ + op3S REG_v(v30); \ + \ src1 = vec_ld(stride * i, src); \ src2 = vec_ld((stride * i) + 16, src); \ - register vector unsigned char srcO asm ("v22") = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ + srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ dst1 = vec_ld(stride * i, dst); \ dst2 = vec_ld((stride * i) + 16, dst); \ - register vector unsigned char dstO asm ("v23") = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ + dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ /* promote the unsigned chars to signed shorts */ \ - register vector signed short srcV asm ("v24") = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ - register vector signed short dstV asm ("v25") = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ - register vector signed short srcW asm ("v26") = \ - (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)srcO); \ - register vector signed short dstW asm ("v27") = \ - (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)dstO); \ + srcV = \ + (vector signed short)vec_mergeh((vector signed char)vzero, \ + (vector signed char)srcO); \ + dstV = \ + (vector signed short)vec_mergeh((vector signed char)vzero, \ + (vector signed char)dstO); \ + srcW = \ + (vector signed short)vec_mergel((vector signed char)vzero, \ + (vector signed char)srcO); \ + dstW = \ + (vector signed short)vec_mergel((vector signed char)vzero, \ + (vector signed char)dstO); \ /* substractions inside the first butterfly */ \ - register vector signed short but0 asm ("v28") = vec_sub(srcV, dstV); \ - register vector signed short but0S asm ("v29") = vec_sub(srcW, dstW); \ - register vector signed short op1 asm ("v30") = vec_perm(but0, but0, perm1); \ - register vector signed short but1 asm ("v22") = vec_mladd(but0, vprod1, op1); \ - register vector signed short op1S asm ("v23") = vec_perm(but0S, but0S, perm1); \ - register vector signed short but1S asm ("v24") = vec_mladd(but0S, vprod1, op1S); \ - register vector signed short op2 asm ("v25") = vec_perm(but1, but1, perm2); \ - register vector signed short but2 asm ("v26") = vec_mladd(but1, vprod2, op2); \ - register vector signed short op2S asm ("v27") = vec_perm(but1S, but1S, perm2); \ - register vector signed short but2S asm ("v28") = vec_mladd(but1S, vprod2, op2S); \ - register vector signed short op3 asm ("v29") = vec_perm(but2, but2, perm3); \ + but0 = vec_sub(srcV, dstV); \ + but0S = vec_sub(srcW, dstW); \ + op1 = vec_perm(but0, but0, perm1); \ + but1 = vec_mladd(but0, vprod1, op1); \ + op1S = vec_perm(but0S, but0S, perm1); \ + but1S = vec_mladd(but0S, vprod1, op1S); \ + op2 = vec_perm(but1, but1, perm2); \ + but2 = vec_mladd(but1, vprod2, op2); \ + op2S = vec_perm(but1S, but1S, perm2); \ + but2S = vec_mladd(but1S, vprod2, op2S); \ + op3 = vec_perm(but2, but2, perm3); \ res1 = vec_mladd(but2, vprod3, op3); \ - register vector signed short op3S asm ("v30") = vec_perm(but2S, but2S, perm3); \ + op3S = vec_perm(but2S, but2S, perm3); \ res2 = vec_mladd(but2S, vprod3, op3S); \ } ONEITERBUTTERFLY(0, temp0, temp0S); @@ -1527,6 +1556,12 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, #undef ONEITERBUTTERFLY { register vector signed int vsum; + register vector signed short line0S, line1S, line2S, line3S, line4S, + line5S, line6S, line7S, line0BS,line2BS, + line1BS,line3BS,line4BS,line6BS,line5BS, + line7BS,line0CS,line4CS,line1CS,line5CS, + line2CS,line6CS,line3CS,line7CS; + register vector signed short line0 = vec_add(temp0, temp1); register vector signed short line1 = vec_sub(temp0, temp1); register vector signed short line2 = vec_add(temp2, temp3); @@ -1563,32 +1598,32 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, vsum = vec_sum4s(vec_abs(line6C), vsum); vsum = vec_sum4s(vec_abs(line7C), vsum); - register vector signed short line0S = vec_add(temp0S, temp1S); - register vector signed short line1S = vec_sub(temp0S, temp1S); - register vector signed short line2S = vec_add(temp2S, temp3S); - register vector signed short line3S = vec_sub(temp2S, temp3S); - register vector signed short line4S = vec_add(temp4S, temp5S); - register vector signed short line5S = vec_sub(temp4S, temp5S); - register vector signed short line6S = vec_add(temp6S, temp7S); - register vector signed short line7S = vec_sub(temp6S, temp7S); - - register vector signed short line0BS = vec_add(line0S, line2S); - register vector signed short line2BS = vec_sub(line0S, line2S); - register vector signed short line1BS = vec_add(line1S, line3S); - register vector signed short line3BS = vec_sub(line1S, line3S); - register vector signed short line4BS = vec_add(line4S, line6S); - register vector signed short line6BS = vec_sub(line4S, line6S); - register vector signed short line5BS = vec_add(line5S, line7S); - register vector signed short line7BS = vec_sub(line5S, line7S); - - register vector signed short line0CS = vec_add(line0BS, line4BS); - register vector signed short line4CS = vec_sub(line0BS, line4BS); - register vector signed short line1CS = vec_add(line1BS, line5BS); - register vector signed short line5CS = vec_sub(line1BS, line5BS); - register vector signed short line2CS = vec_add(line2BS, line6BS); - register vector signed short line6CS = vec_sub(line2BS, line6BS); - register vector signed short line3CS = vec_add(line3BS, line7BS); - register vector signed short line7CS = vec_sub(line3BS, line7BS); + line0S = vec_add(temp0S, temp1S); + line1S = vec_sub(temp0S, temp1S); + line2S = vec_add(temp2S, temp3S); + line3S = vec_sub(temp2S, temp3S); + line4S = vec_add(temp4S, temp5S); + line5S = vec_sub(temp4S, temp5S); + line6S = vec_add(temp6S, temp7S); + line7S = vec_sub(temp6S, temp7S); + + line0BS = vec_add(line0S, line2S); + line2BS = vec_sub(line0S, line2S); + line1BS = vec_add(line1S, line3S); + line3BS = vec_sub(line1S, line3S); + line4BS = vec_add(line4S, line6S); + line6BS = vec_sub(line4S, line6S); + line5BS = vec_add(line5S, line7S); + line7BS = vec_sub(line5S, line7S); + + line0CS = vec_add(line0BS, line4BS); + line4CS = vec_sub(line0BS, line4BS); + line1CS = vec_add(line1BS, line5BS); + line5CS = vec_sub(line1BS, line5BS); + line2CS = vec_add(line2BS, line6BS); + line6CS = vec_sub(line2BS, line6BS); + line3CS = vec_add(line3BS, line7BS); + line7CS = vec_sub(line3BS, line7BS); vsum = vec_sum4s(vec_abs(line0CS), vsum); vsum = vec_sum4s(vec_abs(line1CS), vsum); @@ -1618,7 +1653,6 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1); POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1); return score; } -#endif //CONFIG_DARWIN int has_altivec(void) { diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c index b9fef005e..14391e60c 100755 --- a/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c @@ -188,44 +188,97 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ }\ +static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, + const uint8_t * src2, int dst_stride, + int src_stride1, int h) +{ + int i; + vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align; -/* from dsputil.c */ -static inline void put_pixels8_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - int i; - for (i = 0; i < h; i++) { - uint32_t a, b; - a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); - *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(a, b); - a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); - *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(a, b); - } -} static inline void avg_pixels8_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - int i; - for (i = 0; i < h; i++) { - uint32_t a, b; - a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); - *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride]), rnd_avg32(a, b)); - a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); - *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride + 4]), rnd_avg32(a, b)); - } -} static inline void put_pixels16_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - put_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); - put_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); -} static inline void avg_pixels16_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - avg_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); - avg_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); + mask_ = vec_lvsl(0, src2); + + for (i = 0; i < h; i++) { + + tmp1 = vec_ld(i * src_stride1, src1); + mask = vec_lvsl(i * src_stride1, src1); + tmp2 = vec_ld(i * src_stride1 + 15, src1); + + a = vec_perm(tmp1, tmp2, mask); + + tmp1 = vec_ld(i * 16, src2); + tmp2 = vec_ld(i * 16 + 15, src2); + + b = vec_perm(tmp1, tmp2, mask_); + + tmp1 = vec_ld(0, dst); + mask = vec_lvsl(0, dst); + tmp2 = vec_ld(15, dst); + + d = vec_avg(a, b); + + edges = vec_perm(tmp2, tmp1, mask); + + align = vec_lvsr(0, dst); + + tmp1 = vec_perm(edges, d, align); + tmp2 = vec_perm(d, edges, align); + + vec_st(tmp2, 15, dst); + vec_st(tmp1, 0 , dst); + + dst += dst_stride; + } +} + +static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, + const uint8_t * src2, int dst_stride, + int src_stride1, int h) +{ + int i; + vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align; + + mask_ = vec_lvsl(0, src2); + + for (i = 0; i < h; i++) { + + tmp1 = vec_ld(i * src_stride1, src1); + mask = vec_lvsl(i * src_stride1, src1); + tmp2 = vec_ld(i * src_stride1 + 15, src1); + + a = vec_perm(tmp1, tmp2, mask); + + tmp1 = vec_ld(i * 16, src2); + tmp2 = vec_ld(i * 16 + 15, src2); + + b = vec_perm(tmp1, tmp2, mask_); + + tmp1 = vec_ld(0, dst); + mask = vec_lvsl(0, dst); + tmp2 = vec_ld(15, dst); + + d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b)); + + edges = vec_perm(tmp2, tmp1, mask); + + align = vec_lvsr(0, dst); + + tmp1 = vec_perm(edges, d, align); + tmp2 = vec_perm(d, edges, align); + + vec_st(tmp2, 15, dst); + vec_st(tmp1, 0 , dst); + + dst += dst_stride; + } } -/* UNIMPLEMENTED YET !! */ +/* Implemented but could be faster #define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h) #define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h) + */ -H264_MC(put_, 16, altivec) - H264_MC(avg_, 16, altivec) + H264_MC(put_, 16, altivec) + H264_MC(avg_, 16, altivec) void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) { diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c index 7f46ccf14..37f4de58f 100755 --- a/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c @@ -19,70 +19,76 @@ /* this code assume that stride % 16 == 0 */ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1); - POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1); - signed int ABCD[4] __attribute__((aligned(16))); + signed int ABCD[4] __attribute__((aligned(16))) = + {((8 - x) * (8 - y)), + ((x) * (8 - y)), + ((8 - x) * (y)), + ((x) * (y))}; register int i; - ABCD[0] = ((8 - x) * (8 - y)); - ABCD[1] = ((x) * (8 - y)); - ABCD[2] = ((8 - x) * (y)); - ABCD[3] = ((x) * (y)); + vector unsigned char fperm; const vector signed int vABCD = vec_ld(0, ABCD); const vector signed short vA = vec_splat((vector signed short)vABCD, 1); const vector signed short vB = vec_splat((vector signed short)vABCD, 3); const vector signed short vC = vec_splat((vector signed short)vABCD, 5); const vector signed short vD = vec_splat((vector signed short)vABCD, 7); const vector signed int vzero = vec_splat_s32(0); - const vector signed short v32ss = (const vector signed short)AVV(32); + const vector signed short v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5)); const vector unsigned short v6us = vec_splat_u16(6); + register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; + register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; - vector unsigned char fperm; + vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1; + vector unsigned char vsrc0uc, vsrc1uc; + vector signed short vsrc0ssH, vsrc1ssH; + vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc; + vector signed short vsrc2ssH, vsrc3ssH, psum; + vector unsigned char vdst, ppsum, vfdst, fsum; + + POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1); if (((unsigned long)dst) % 16 == 0) { - fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F); + fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, + 0x08, 0x09, 0x0A, 0x0B, + 0x0C, 0x0D, 0x0E, 0x0F); } else { - fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F); + fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, + 0x18, 0x19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F); } - register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; - register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; - - vector unsigned char vsrcAuc; - vector unsigned char vsrcBuc; - vector unsigned char vsrcperm0; - vector unsigned char vsrcperm1; vsrcAuc = vec_ld(0, src); + if (loadSecond) vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); - vector unsigned char vsrc0uc; - vector unsigned char vsrc1uc; vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); if (reallyBadAlign) vsrc1uc = vsrcBuc; else vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); - vector signed short vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc); - vector signed short vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc); + vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc0uc); + vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc1uc); if (!loadSecond) {// -> !reallyBadAlign for (i = 0 ; i < h ; i++) { - vector unsigned char vsrcCuc; + + vsrcCuc = vec_ld(stride + 0, src); - vector unsigned char vsrc2uc; - vector unsigned char vsrc3uc; vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); - vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc); - vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc); - - vector signed short psum; + vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc2uc); + vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc3uc); psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); @@ -91,11 +97,9 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in psum = vec_add(v32ss, psum); psum = vec_sra(psum, v6us); - vector unsigned char vdst = vec_ld(0, dst); - vector unsigned char ppsum = (vector unsigned char)vec_packsu(psum, psum); - - vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm); - vector unsigned char fsum; + vdst = vec_ld(0, dst); + ppsum = (vector unsigned char)vec_packsu(psum, psum); + vfdst = vec_perm(vdst, ppsum, fperm); OP_U8_ALTIVEC(fsum, vfdst, vdst); @@ -108,24 +112,21 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in src += stride; } } else { - for (i = 0 ; i < h ; i++) { - vector unsigned char vsrcCuc; vector unsigned char vsrcDuc; + for (i = 0 ; i < h ; i++) { vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 16, src); - vector unsigned char vsrc2uc; - vector unsigned char vsrc3uc; vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); if (reallyBadAlign) vsrc3uc = vsrcDuc; else vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); - vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc); - vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc); - - vector signed short psum; + vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc2uc); + vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc3uc); psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); @@ -134,11 +135,9 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in psum = vec_add(v32ss, psum); psum = vec_sr(psum, v6us); - vector unsigned char vdst = vec_ld(0, dst); - vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum); - - vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm); - vector unsigned char fsum; + vdst = vec_ld(0, dst); + ppsum = (vector unsigned char)vec_pack(psum, psum); + vfdst = vec_perm(vdst, ppsum, fperm); OP_U8_ALTIVEC(fsum, vfdst, vdst); @@ -157,7 +156,6 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in /* this code assume stride % 16 == 0 */ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1); - POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); register int i; const vector signed int vzero = vec_splat_s32(0); @@ -167,18 +165,35 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, i const vector unsigned char permP1 = vec_lvsl(+1, src); const vector unsigned char permP2 = vec_lvsl(+2, src); const vector unsigned char permP3 = vec_lvsl(+3, src); - const vector signed short v20ss = (const vector signed short)AVV(20); - const vector unsigned short v5us = vec_splat_u16(5); const vector signed short v5ss = vec_splat_s16(5); - const vector signed short v16ss = (const vector signed short)AVV(16); + const vector unsigned short v5us = vec_splat_u16(5); + const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); + const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4)); const vector unsigned char dstperm = vec_lvsr(0, dst); - const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); - const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); + const vector unsigned char neg1 = + (const vector unsigned char) vec_splat_s8(-1); + + const vector unsigned char dstmask = + vec_perm((const vector unsigned char)vzero, + neg1, dstperm); + + vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; register int align = ((((unsigned long)src) - 2) % 16); + vector signed short srcP0A, srcP0B, srcP1A, srcP1B, + srcP2A, srcP2B, srcP3A, srcP3B, + srcM1A, srcM1B, srcM2A, srcM2B, + sum1A, sum1B, sum2A, sum2B, sum3A, sum3B, + pp1A, pp1B, pp2A, pp2B, pp3A, pp3B, + psumA, psumB, sumA, sumB; + + vector unsigned char sum, dst1, dst2, vdst, fsum, + rsum, fdst1, fdst2; + + POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); + for (i = 0 ; i < 16 ; i ++) { - vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; vector unsigned char srcR1 = vec_ld(-2, src); vector unsigned char srcR2 = vec_ld(14, src); @@ -237,55 +252,66 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, i } break; } - const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0); - const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0); - const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1); - const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1); - - const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2); - const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2); - const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3); - const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3); - - const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1); - const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1); - const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2); - const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2); - - const vector signed short sum1A = vec_adds(srcP0A, srcP1A); - const vector signed short sum1B = vec_adds(srcP0B, srcP1B); - const vector signed short sum2A = vec_adds(srcM1A, srcP2A); - const vector signed short sum2B = vec_adds(srcM1B, srcP2B); - const vector signed short sum3A = vec_adds(srcM2A, srcP3A); - const vector signed short sum3B = vec_adds(srcM2B, srcP3B); - - const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss); - const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss); - - const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); - const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); - - const vector signed short pp3A = vec_add(sum3A, pp1A); - const vector signed short pp3B = vec_add(sum3B, pp1B); - - const vector signed short psumA = vec_sub(pp3A, pp2A); - const vector signed short psumB = vec_sub(pp3B, pp2B); + srcP0A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP0); + srcP0B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP0); + srcP1A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP1); + srcP1B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP1); + + srcP2A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP2); + srcP2B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP2); + srcP3A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP3); + srcP3B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP3); + + srcM1A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM1); + srcM1B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM1); + srcM2A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM2); + srcM2B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM2); + + sum1A = vec_adds(srcP0A, srcP1A); + sum1B = vec_adds(srcP0B, srcP1B); + sum2A = vec_adds(srcM1A, srcP2A); + sum2B = vec_adds(srcM1B, srcP2B); + sum3A = vec_adds(srcM2A, srcP3A); + sum3B = vec_adds(srcM2B, srcP3B); + + pp1A = vec_mladd(sum1A, v20ss, v16ss); + pp1B = vec_mladd(sum1B, v20ss, v16ss); + + pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); + pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); + + pp3A = vec_add(sum3A, pp1A); + pp3B = vec_add(sum3B, pp1B); + + psumA = vec_sub(pp3A, pp2A); + psumB = vec_sub(pp3B, pp2B); + + sumA = vec_sra(psumA, v5us); + sumB = vec_sra(psumB, v5us); + + sum = vec_packsu(sumA, sumB); + + dst1 = vec_ld(0, dst); + dst2 = vec_ld(16, dst); + vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); - const vector signed short sumA = vec_sra(psumA, v5us); - const vector signed short sumB = vec_sra(psumB, v5us); - - const vector unsigned char sum = vec_packsu(sumA, sumB); - - const vector unsigned char dst1 = vec_ld(0, dst); - const vector unsigned char dst2 = vec_ld(16, dst); - const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); - - vector unsigned char fsum; OP_U8_ALTIVEC(fsum, sum, vdst); - const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm); - const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask); - const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask); + rsum = vec_perm(fsum, fsum, dstperm); + fdst1 = vec_sel(dst1, rsum, dstmask); + fdst2 = vec_sel(rsum, dst2, dstmask); vec_st(fdst1, 0, dst); vec_st(fdst2, 16, dst); @@ -299,16 +325,15 @@ POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); /* this code assume stride % 16 == 0 */ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1); - POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); register int i; const vector signed int vzero = vec_splat_s32(0); const vector unsigned char perm = vec_lvsl(0, src); - const vector signed short v20ss = (const vector signed short)AVV(20); + const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); const vector unsigned short v5us = vec_splat_u16(5); const vector signed short v5ss = vec_splat_s16(5); - const vector signed short v16ss = (const vector signed short)AVV(16); + const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4)); const vector unsigned char dstperm = vec_lvsr(0, dst); const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); @@ -318,49 +343,71 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, i const vector unsigned char srcM2a = vec_ld(0, srcbis); const vector unsigned char srcM2b = vec_ld(16, srcbis); const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm); - srcbis += srcStride; - const vector unsigned char srcM1a = vec_ld(0, srcbis); +// srcbis += srcStride; + const vector unsigned char srcM1a = vec_ld(0, srcbis += srcStride); const vector unsigned char srcM1b = vec_ld(16, srcbis); const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm); - srcbis += srcStride; - const vector unsigned char srcP0a = vec_ld(0, srcbis); +// srcbis += srcStride; + const vector unsigned char srcP0a = vec_ld(0, srcbis += srcStride); const vector unsigned char srcP0b = vec_ld(16, srcbis); const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm); - srcbis += srcStride; - const vector unsigned char srcP1a = vec_ld(0, srcbis); +// srcbis += srcStride; + const vector unsigned char srcP1a = vec_ld(0, srcbis += srcStride); const vector unsigned char srcP1b = vec_ld(16, srcbis); const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm); - srcbis += srcStride; - const vector unsigned char srcP2a = vec_ld(0, srcbis); +// srcbis += srcStride; + const vector unsigned char srcP2a = vec_ld(0, srcbis += srcStride); const vector unsigned char srcP2b = vec_ld(16, srcbis); const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm); - srcbis += srcStride; - - vector signed short srcM2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2); - vector signed short srcM2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2); - vector signed short srcM1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1); - vector signed short srcM1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1); - vector signed short srcP0ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0); - vector signed short srcP0ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0); - vector signed short srcP1ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1); - vector signed short srcP1ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1); - vector signed short srcP2ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2); - vector signed short srcP2ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2); +// srcbis += srcStride; + + vector signed short srcM2ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM2); + vector signed short srcM2ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM2); + vector signed short srcM1ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM1); + vector signed short srcM1ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM1); + vector signed short srcP0ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP0); + vector signed short srcP0ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP0); + vector signed short srcP1ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP1); + vector signed short srcP1ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP1); + vector signed short srcP2ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP2); + vector signed short srcP2ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP2); + + vector signed short pp1A, pp1B, pp2A, pp2B, pp3A, pp3B, + psumA, psumB, sumA, sumB, + srcP3ssA, srcP3ssB, + sum1A, sum1B, sum2A, sum2B, sum3A, sum3B; + + vector unsigned char sum, dst1, dst2, vdst, fsum, rsum, fdst1, fdst2, + srcP3a, srcP3b, srcP3; + + POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); for (i = 0 ; i < 16 ; i++) { - const vector unsigned char srcP3a = vec_ld(0, srcbis); - const vector unsigned char srcP3b = vec_ld(16, srcbis); - const vector unsigned char srcP3 = vec_perm(srcP3a, srcP3b, perm); - const vector signed short srcP3ssA = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3); - const vector signed short srcP3ssB = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3); - srcbis += srcStride; - - const vector signed short sum1A = vec_adds(srcP0ssA, srcP1ssA); - const vector signed short sum1B = vec_adds(srcP0ssB, srcP1ssB); - const vector signed short sum2A = vec_adds(srcM1ssA, srcP2ssA); - const vector signed short sum2B = vec_adds(srcM1ssB, srcP2ssB); - const vector signed short sum3A = vec_adds(srcM2ssA, srcP3ssA); - const vector signed short sum3B = vec_adds(srcM2ssB, srcP3ssB); + srcP3a = vec_ld(0, srcbis += srcStride); + srcP3b = vec_ld(16, srcbis); + srcP3 = vec_perm(srcP3a, srcP3b, perm); + srcP3ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP3); + srcP3ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP3); +// srcbis += srcStride; + + sum1A = vec_adds(srcP0ssA, srcP1ssA); + sum1B = vec_adds(srcP0ssB, srcP1ssB); + sum2A = vec_adds(srcM1ssA, srcP2ssA); + sum2B = vec_adds(srcM1ssB, srcP2ssB); + sum3A = vec_adds(srcM2ssA, srcP3ssA); + sum3B = vec_adds(srcM2ssB, srcP3ssB); srcM2ssA = srcM1ssA; srcM2ssB = srcM1ssB; @@ -373,33 +420,32 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, i srcP2ssA = srcP3ssA; srcP2ssB = srcP3ssB; - const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss); - const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss); + pp1A = vec_mladd(sum1A, v20ss, v16ss); + pp1B = vec_mladd(sum1B, v20ss, v16ss); - const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); - const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); + pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); + pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); - const vector signed short pp3A = vec_add(sum3A, pp1A); - const vector signed short pp3B = vec_add(sum3B, pp1B); + pp3A = vec_add(sum3A, pp1A); + pp3B = vec_add(sum3B, pp1B); - const vector signed short psumA = vec_sub(pp3A, pp2A); - const vector signed short psumB = vec_sub(pp3B, pp2B); + psumA = vec_sub(pp3A, pp2A); + psumB = vec_sub(pp3B, pp2B); - const vector signed short sumA = vec_sra(psumA, v5us); - const vector signed short sumB = vec_sra(psumB, v5us); + sumA = vec_sra(psumA, v5us); + sumB = vec_sra(psumB, v5us); - const vector unsigned char sum = vec_packsu(sumA, sumB); + sum = vec_packsu(sumA, sumB); - const vector unsigned char dst1 = vec_ld(0, dst); - const vector unsigned char dst2 = vec_ld(16, dst); - const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); + dst1 = vec_ld(0, dst); + dst2 = vec_ld(16, dst); + vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); - vector unsigned char fsum; OP_U8_ALTIVEC(fsum, sum, vdst); - const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm); - const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask); - const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask); + rsum = vec_perm(fsum, fsum, dstperm); + fdst1 = vec_sel(dst1, rsum, dstmask); + fdst2 = vec_sel(rsum, dst2, dstmask); vec_st(fdst1, 0, dst); vec_st(fdst2, 16, dst); @@ -412,7 +458,6 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, i /* this code assume stride % 16 == 0 *and* tmp is properly aligned */ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1); - POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1); register int i; const vector signed int vzero = vec_splat_s32(0); const vector unsigned char permM2 = vec_lvsl(-2, src); @@ -421,17 +466,47 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, const vector unsigned char permP1 = vec_lvsl(+1, src); const vector unsigned char permP2 = vec_lvsl(+2, src); const vector unsigned char permP3 = vec_lvsl(+3, src); - const vector signed short v20ss = (const vector signed short)AVV(20); + const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); const vector unsigned int v10ui = vec_splat_u32(10); const vector signed short v5ss = vec_splat_s16(5); const vector signed short v1ss = vec_splat_s16(1); - const vector signed int v512si = (const vector signed int)AVV(512); - const vector unsigned int v16ui = (const vector unsigned int)AVV(16); + const vector signed int v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9)); + const vector unsigned int v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4)); register int align = ((((unsigned long)src) - 2) % 16); - src -= (2 * srcStride); + const vector unsigned char neg1 = (const vector unsigned char) + vec_splat_s8(-1); + + vector signed short srcP0A, srcP0B, srcP1A, srcP1B, + srcP2A, srcP2B, srcP3A, srcP3B, + srcM1A, srcM1B, srcM2A, srcM2B, + sum1A, sum1B, sum2A, sum2B, sum3A, sum3B, + pp1A, pp1B, pp2A, pp2B, psumA, psumB; + + const vector unsigned char dstperm = vec_lvsr(0, dst); + + const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); + + const vector unsigned char mperm = (const vector unsigned char) + AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B, + 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F); + int16_t *tmpbis = tmp; + + vector signed short tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB, + tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB, + tmpP2ssA, tmpP2ssB; + + vector signed int pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo, + pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo, + pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo, + ssumAe, ssumAo, ssumBe, ssumBo; + vector unsigned char fsum, sumv, sum, dst1, dst2, vdst, + rsum, fdst1, fdst2; + vector signed short ssume, ssumo; + POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1); + src -= (2 * srcStride); for (i = 0 ; i < 21 ; i ++) { vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; vector unsigned char srcR1 = vec_ld(-2, src); @@ -492,36 +567,48 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, } break; } - const vector signed short srcP0A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP0); - const vector signed short srcP0B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP0); - const vector signed short srcP1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP1); - const vector signed short srcP1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP1); - - const vector signed short srcP2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP2); - const vector signed short srcP2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP2); - const vector signed short srcP3A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcP3); - const vector signed short srcP3B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcP3); - - const vector signed short srcM1A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM1); - const vector signed short srcM1B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM1); - const vector signed short srcM2A = (vector signed short)vec_mergeh((vector unsigned char)vzero, srcM2); - const vector signed short srcM2B = (vector signed short)vec_mergel((vector unsigned char)vzero, srcM2); - - const vector signed short sum1A = vec_adds(srcP0A, srcP1A); - const vector signed short sum1B = vec_adds(srcP0B, srcP1B); - const vector signed short sum2A = vec_adds(srcM1A, srcP2A); - const vector signed short sum2B = vec_adds(srcM1B, srcP2B); - const vector signed short sum3A = vec_adds(srcM2A, srcP3A); - const vector signed short sum3B = vec_adds(srcM2B, srcP3B); - - const vector signed short pp1A = vec_mladd(sum1A, v20ss, sum3A); - const vector signed short pp1B = vec_mladd(sum1B, v20ss, sum3B); - - const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); - const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); - - const vector signed short psumA = vec_sub(pp1A, pp2A); - const vector signed short psumB = vec_sub(pp1B, pp2B); + srcP0A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP0); + srcP0B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP0); + srcP1A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP1); + srcP1B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP1); + + srcP2A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP2); + srcP2B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP2); + srcP3A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP3); + srcP3B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP3); + + srcM1A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM1); + srcM1B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM1); + srcM2A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM2); + srcM2B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM2); + + sum1A = vec_adds(srcP0A, srcP1A); + sum1B = vec_adds(srcP0B, srcP1B); + sum2A = vec_adds(srcM1A, srcP2A); + sum2B = vec_adds(srcM1B, srcP2B); + sum3A = vec_adds(srcM2A, srcP3A); + sum3B = vec_adds(srcM2B, srcP3B); + + pp1A = vec_mladd(sum1A, v20ss, sum3A); + pp1B = vec_mladd(sum1B, v20ss, sum3B); + + pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); + pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); + + psumA = vec_sub(pp1A, pp2A); + psumB = vec_sub(pp1B, pp2B); vec_st(psumA, 0, tmp); vec_st(psumB, 16, tmp); @@ -530,35 +617,25 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */ } - const vector unsigned char dstperm = vec_lvsr(0, dst); - const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); - const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); - const vector unsigned char mperm = (const vector unsigned char) - AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B, - 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F); - - int16_t *tmpbis = tmp - (tmpStride * 21); - - vector signed short tmpM2ssA = vec_ld(0, tmpbis); - vector signed short tmpM2ssB = vec_ld(16, tmpbis); + tmpM2ssA = vec_ld(0, tmpbis); + tmpM2ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; - vector signed short tmpM1ssA = vec_ld(0, tmpbis); - vector signed short tmpM1ssB = vec_ld(16, tmpbis); + tmpM1ssA = vec_ld(0, tmpbis); + tmpM1ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; - vector signed short tmpP0ssA = vec_ld(0, tmpbis); - vector signed short tmpP0ssB = vec_ld(16, tmpbis); + tmpP0ssA = vec_ld(0, tmpbis); + tmpP0ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; - vector signed short tmpP1ssA = vec_ld(0, tmpbis); - vector signed short tmpP1ssB = vec_ld(16, tmpbis); + tmpP1ssA = vec_ld(0, tmpbis); + tmpP1ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; - vector signed short tmpP2ssA = vec_ld(0, tmpbis); - vector signed short tmpP2ssB = vec_ld(16, tmpbis); + tmpP2ssA = vec_ld(0, tmpbis); + tmpP2ssB = vec_ld(16, tmpbis); tmpbis += tmpStride; for (i = 0 ; i < 16 ; i++) { const vector signed short tmpP3ssA = vec_ld(0, tmpbis); const vector signed short tmpP3ssB = vec_ld(16, tmpbis); - tmpbis += tmpStride; const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA); const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB); @@ -567,6 +644,8 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA); const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB); + tmpbis += tmpStride; + tmpM2ssA = tmpM1ssA; tmpM2ssB = tmpM1ssB; tmpM1ssA = tmpP0ssA; @@ -578,57 +657,56 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, tmpP2ssA = tmpP3ssA; tmpP2ssB = tmpP3ssB; - const vector signed int pp1Ae = vec_mule(sum1A, v20ss); - const vector signed int pp1Ao = vec_mulo(sum1A, v20ss); - const vector signed int pp1Be = vec_mule(sum1B, v20ss); - const vector signed int pp1Bo = vec_mulo(sum1B, v20ss); + pp1Ae = vec_mule(sum1A, v20ss); + pp1Ao = vec_mulo(sum1A, v20ss); + pp1Be = vec_mule(sum1B, v20ss); + pp1Bo = vec_mulo(sum1B, v20ss); - const vector signed int pp2Ae = vec_mule(sum2A, v5ss); - const vector signed int pp2Ao = vec_mulo(sum2A, v5ss); - const vector signed int pp2Be = vec_mule(sum2B, v5ss); - const vector signed int pp2Bo = vec_mulo(sum2B, v5ss); + pp2Ae = vec_mule(sum2A, v5ss); + pp2Ao = vec_mulo(sum2A, v5ss); + pp2Be = vec_mule(sum2B, v5ss); + pp2Bo = vec_mulo(sum2B, v5ss); - const vector signed int pp3Ae = vec_sra((vector signed int)sum3A, v16ui); - const vector signed int pp3Ao = vec_mulo(sum3A, v1ss); - const vector signed int pp3Be = vec_sra((vector signed int)sum3B, v16ui); - const vector signed int pp3Bo = vec_mulo(sum3B, v1ss); + pp3Ae = vec_sra((vector signed int)sum3A, v16ui); + pp3Ao = vec_mulo(sum3A, v1ss); + pp3Be = vec_sra((vector signed int)sum3B, v16ui); + pp3Bo = vec_mulo(sum3B, v1ss); - const vector signed int pp1cAe = vec_add(pp1Ae, v512si); - const vector signed int pp1cAo = vec_add(pp1Ao, v512si); - const vector signed int pp1cBe = vec_add(pp1Be, v512si); - const vector signed int pp1cBo = vec_add(pp1Bo, v512si); + pp1cAe = vec_add(pp1Ae, v512si); + pp1cAo = vec_add(pp1Ao, v512si); + pp1cBe = vec_add(pp1Be, v512si); + pp1cBo = vec_add(pp1Bo, v512si); - const vector signed int pp32Ae = vec_sub(pp3Ae, pp2Ae); - const vector signed int pp32Ao = vec_sub(pp3Ao, pp2Ao); - const vector signed int pp32Be = vec_sub(pp3Be, pp2Be); - const vector signed int pp32Bo = vec_sub(pp3Bo, pp2Bo); + pp32Ae = vec_sub(pp3Ae, pp2Ae); + pp32Ao = vec_sub(pp3Ao, pp2Ao); + pp32Be = vec_sub(pp3Be, pp2Be); + pp32Bo = vec_sub(pp3Bo, pp2Bo); - const vector signed int sumAe = vec_add(pp1cAe, pp32Ae); - const vector signed int sumAo = vec_add(pp1cAo, pp32Ao); - const vector signed int sumBe = vec_add(pp1cBe, pp32Be); - const vector signed int sumBo = vec_add(pp1cBo, pp32Bo); + sumAe = vec_add(pp1cAe, pp32Ae); + sumAo = vec_add(pp1cAo, pp32Ao); + sumBe = vec_add(pp1cBe, pp32Be); + sumBo = vec_add(pp1cBo, pp32Bo); - const vector signed int ssumAe = vec_sra(sumAe, v10ui); - const vector signed int ssumAo = vec_sra(sumAo, v10ui); - const vector signed int ssumBe = vec_sra(sumBe, v10ui); - const vector signed int ssumBo = vec_sra(sumBo, v10ui); + ssumAe = vec_sra(sumAe, v10ui); + ssumAo = vec_sra(sumAo, v10ui); + ssumBe = vec_sra(sumBe, v10ui); + ssumBo = vec_sra(sumBo, v10ui); - const vector signed short ssume = vec_packs(ssumAe, ssumBe); - const vector signed short ssumo = vec_packs(ssumAo, ssumBo); + ssume = vec_packs(ssumAe, ssumBe); + ssumo = vec_packs(ssumAo, ssumBo); - const vector unsigned char sumv = vec_packsu(ssume, ssumo); - const vector unsigned char sum = vec_perm(sumv, sumv, mperm); + sumv = vec_packsu(ssume, ssumo); + sum = vec_perm(sumv, sumv, mperm); - const vector unsigned char dst1 = vec_ld(0, dst); - const vector unsigned char dst2 = vec_ld(16, dst); - const vector unsigned char vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); + dst1 = vec_ld(0, dst); + dst2 = vec_ld(16, dst); + vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); - vector unsigned char fsum; OP_U8_ALTIVEC(fsum, sum, vdst); - const vector unsigned char rsum = vec_perm(fsum, fsum, dstperm); - const vector unsigned char fdst1 = vec_sel(dst1, rsum, dstmask); - const vector unsigned char fdst2 = vec_sel(rsum, dst2, dstmask); + rsum = vec_perm(fsum, fsum, dstperm); + fdst1 = vec_sel(dst1, rsum, dstmask); + fdst2 = vec_sel(rsum, dst2, dstmask); vec_st(fdst1, 0, dst); vec_st(fdst2, 16, dst); diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c index d5f55b80f..b63c8dd84 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c @@ -30,6 +30,17 @@ extern void fdct_altivec(int16_t *block); extern void idct_put_altivec(uint8_t *dest, int line_size, int16_t *block); extern void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block); +extern void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width); +extern void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, + DWTELEM *b2, DWTELEM *b3, + DWTELEM *b4, DWTELEM *b5, + int width); +extern void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride, + uint8_t * * block, int b_w, int b_h, + int src_x, int src_y, int src_stride, + slice_buffer * sb, int add, + uint8_t * dst8); + int mm_flags = 0; int mm_support(void) @@ -292,10 +303,13 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) c->gmc1 = gmc1_altivec; -#ifdef CONFIG_DARWIN // ATM gcc-3.3 and gcc-3.4 fail to compile these in linux... c->hadamard8_diff[0] = hadamard8_diff16_altivec; c->hadamard8_diff[1] = hadamard8_diff8x8_altivec; -#endif + + + c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec; + c->vertical_compose97i = ff_snow_vertical_compose97i_altivec; + c->inner_add_yblock = ff_snow_inner_add_yblock_altivec; #ifdef CONFIG_ENCODERS if (avctx->dct_algo == FF_DCT_AUTO || diff --git a/src/libffmpeg/libavcodec/ppc/gcc_fixes.h b/src/libffmpeg/libavcodec/ppc/gcc_fixes.h index 288fdf834..943905bc5 100644 --- a/src/libffmpeg/libavcodec/ppc/gcc_fixes.h +++ b/src/libffmpeg/libavcodec/ppc/gcc_fixes.h @@ -17,8 +17,17 @@ # else # define AVV # endif +#define REG_v(a) asm ( #a ) #else + #define AVV(x...) {x} + +#if (__GNUC__ < 4) +# define REG_v(a) +#else +# define REG_v(a) asm ( #a ) +#endif + #if (__GNUC__ * 100 + __GNUC_MINOR__ < 303) /* This code was provided to me by Bartosch Pixa |