From e6d1a700e06ba8a985c29ba212b359c9e33bcc55 Mon Sep 17 00:00:00 2001 From: Miguel Freitas Date: Mon, 4 Dec 2006 22:25:13 +0000 Subject: trying an updated ffmpeg version (51.25.0) CVS patchset: 8405 CVS date: 2006/12/04 22:25:13 --- src/libffmpeg/libavcodec/ppc/dsputil_altivec.c | 612 ++++++++--------------- src/libffmpeg/libavcodec/ppc/dsputil_altivec.h | 77 +-- src/libffmpeg/libavcodec/ppc/dsputil_ppc.c | 130 ++--- src/libffmpeg/libavcodec/ppc/dsputil_ppc.h | 21 +- src/libffmpeg/libavcodec/ppc/fdct_altivec.c | 15 +- src/libffmpeg/libavcodec/ppc/fft_altivec.c | 95 +--- src/libffmpeg/libavcodec/ppc/gcc_fixes.h | 16 + src/libffmpeg/libavcodec/ppc/gmc_altivec.c | 38 +- src/libffmpeg/libavcodec/ppc/idct_altivec.c | 24 +- src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c | 58 +-- src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c | 14 +- 11 files changed, 346 insertions(+), 754 deletions(-) (limited to 'src/libffmpeg/libavcodec/ppc') diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c index 81a32c9e3..6f48893a4 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c @@ -3,18 +3,20 @@ * Copyright (c) 2002 Dieter Shirley * Copyright (c) 2003-2004 Romain Dolbeau * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -616,61 +618,28 @@ void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1, } void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) { -#ifdef ALTIVEC_USE_REFERENCE_C_CODE - int i; - for(i=0; i+7l))) - - ((((*((uint32_t *) (block))) ^ - ((((const struct unaligned_32 *) (pixels))-> - l))) & 0xFEFEFEFEUL) >> 1)); - *((uint32_t *) (block + 4)) = - (((*((uint32_t *) (block + 4))) | - ((((const struct unaligned_32 *) (pixels + 4))->l))) - - ((((*((uint32_t *) (block + 4))) ^ - ((((const struct unaligned_32 *) (pixels + - 4))-> - l))) & 0xFEFEFEFEUL) >> 1)); - pixels += line_size; - block += line_size; - } -POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1); - -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv; int i; @@ -830,52 +755,12 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1); } POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1); - -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } /* next one assumes that ((line_size % 8) == 0) */ void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_pixels8_xy2_num, 1); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE - int j; -POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1); - for (j = 0; j < 2; j++) { - int i; - const uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - const uint32_t b = - (((const struct unaligned_32 *) (pixels + 1))->l); - uint32_t l0 = - (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL; - uint32_t h0 = - ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - uint32_t l1, h1; - pixels += line_size; - for (i = 0; i < h; i += 2) { - uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l); - l1 = (a & 0x03030303UL) + (b & 0x03030303UL); - h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); - pixels += line_size; - block += line_size; - a = (((const struct unaligned_32 *) (pixels))->l); - b = (((const struct unaligned_32 *) (pixels + 1))->l); - l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL; - h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); - pixels += line_size; - block += line_size; - } pixels += 4 - line_size * (h + 1); - block += 4 - line_size * h; - } - -POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1); - -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ register int i; register vector unsigned char pixelsv1, pixelsv2, @@ -946,51 +831,12 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1); } POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1); -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } /* next one assumes that ((line_size % 8) == 0) */ void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels8_xy2_num, 1); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE - int j; -POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); - for (j = 0; j < 2; j++) { - int i; - const uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - const uint32_t b = - (((const struct unaligned_32 *) (pixels + 1))->l); - uint32_t l0 = - (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL; - uint32_t h0 = - ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - uint32_t l1, h1; - pixels += line_size; - for (i = 0; i < h; i += 2) { - uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l); - l1 = (a & 0x03030303UL) + (b & 0x03030303UL); - h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); - pixels += line_size; - block += line_size; - a = (((const struct unaligned_32 *) (pixels))->l); - b = (((const struct unaligned_32 *) (pixels + 1))->l); - l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL; - h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); - pixels += line_size; - block += line_size; - } pixels += 4 - line_size * (h + 1); - block += 4 - line_size * h; - } - -POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); - -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ register int i; register vector unsigned char pixelsv1, pixelsv2, @@ -1062,51 +908,12 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); } POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } /* next one assumes that ((line_size % 16) == 0) */ void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_pixels16_xy2_num, 1); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE - int j; -POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1); - for (j = 0; j < 4; j++) { - int i; - const uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - const uint32_t b = - (((const struct unaligned_32 *) (pixels + 1))->l); - uint32_t l0 = - (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL; - uint32_t h0 = - ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - uint32_t l1, h1; - pixels += line_size; - for (i = 0; i < h; i += 2) { - uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l); - l1 = (a & 0x03030303UL) + (b & 0x03030303UL); - h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); - pixels += line_size; - block += line_size; - a = (((const struct unaligned_32 *) (pixels))->l); - b = (((const struct unaligned_32 *) (pixels + 1))->l); - l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL; - h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); - pixels += line_size; - block += line_size; - } pixels += 4 - line_size * (h + 1); - block += 4 - line_size * h; - } - -POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1); - -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ register int i; register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4; @@ -1183,51 +990,12 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1); } POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1); -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } /* next one assumes that ((line_size % 16) == 0) */ void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h) { POWERPC_PERF_DECLARE(altivec_put_no_rnd_pixels16_xy2_num, 1); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE - int j; -POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); - for (j = 0; j < 4; j++) { - int i; - const uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - const uint32_t b = - (((const struct unaligned_32 *) (pixels + 1))->l); - uint32_t l0 = - (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL; - uint32_t h0 = - ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - uint32_t l1, h1; - pixels += line_size; - for (i = 0; i < h; i += 2) { - uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l); - l1 = (a & 0x03030303UL) + (b & 0x03030303UL); - h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); - pixels += line_size; - block += line_size; - a = (((const struct unaligned_32 *) (pixels))->l); - b = (((const struct unaligned_32 *) (pixels + 1))->l); - l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x01010101UL; - h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = - h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL); - pixels += line_size; - block += line_size; - } pixels += 4 - line_size * (h + 1); - block += 4 - line_size * h; - } - -POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); - -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ register int i; register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4; @@ -1305,34 +1073,32 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); } POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){ POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1); - int sum; - register const_vector unsigned char vzero = (const_vector unsigned char)vec_splat_u8(0); - register vector signed short temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; + int sum; + register const_vector unsigned char vzero = + (const_vector unsigned char)vec_splat_u8(0); + register vector signed short temp0, temp1, temp2, temp3, temp4, + temp5, temp6, temp7; POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); { - register const_vector signed short vprod1 = (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1); - register const_vector signed short vprod2 = (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1); - register const_vector signed short vprod3 = (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1); + register const_vector signed short vprod1 =(const_vector signed short) + AVV( 1,-1, 1,-1, 1,-1, 1,-1); + register const_vector signed short vprod2 =(const_vector signed short) + AVV( 1, 1,-1,-1, 1, 1,-1,-1); + register const_vector signed short vprod3 =(const_vector signed short) + AVV( 1, 1, 1, 1,-1,-1,-1,-1); register const_vector unsigned char perm1 = (const_vector unsigned char) - AVV(0x02, 0x03, 0x00, 0x01, - 0x06, 0x07, 0x04, 0x05, - 0x0A, 0x0B, 0x08, 0x09, - 0x0E, 0x0F, 0x0C, 0x0D); + AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05, + 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D); register const_vector unsigned char perm2 = (const_vector unsigned char) - AVV(0x04, 0x05, 0x06, 0x07, - 0x00, 0x01, 0x02, 0x03, - 0x0C, 0x0D, 0x0E, 0x0F, - 0x08, 0x09, 0x0A, 0x0B); + AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, + 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B); register const_vector unsigned char perm3 = (const_vector unsigned char) - AVV(0x08, 0x09, 0x0A, 0x0B, - 0x0C, 0x0D, 0x0E, 0x0F, - 0x00, 0x01, 0x02, 0x03, - 0x04, 0x05, 0x06, 0x07); + AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07); #define ONEITERBUTTERFLY(i, res) \ { \ @@ -1443,45 +1209,46 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1); */ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) { - int sum; - register vector signed short - temp0 REG_v(v0), - temp1 REG_v(v1), - temp2 REG_v(v2), - temp3 REG_v(v3), - temp4 REG_v(v4), - temp5 REG_v(v5), - temp6 REG_v(v6), - temp7 REG_v(v7); - register vector signed short - temp0S REG_v(v8), - temp1S REG_v(v9), - temp2S REG_v(v10), - temp3S REG_v(v11), - temp4S REG_v(v12), - temp5S REG_v(v13), - temp6S REG_v(v14), - temp7S REG_v(v15); - register const_vector unsigned char vzero REG_v(v31)= (const_vector unsigned char)vec_splat_u8(0); + int sum; + register vector signed short + temp0 REG_v(v0), + temp1 REG_v(v1), + temp2 REG_v(v2), + temp3 REG_v(v3), + temp4 REG_v(v4), + temp5 REG_v(v5), + temp6 REG_v(v6), + temp7 REG_v(v7); + register vector signed short + temp0S REG_v(v8), + temp1S REG_v(v9), + temp2S REG_v(v10), + temp3S REG_v(v11), + temp4S REG_v(v12), + temp5S REG_v(v13), + temp6S REG_v(v14), + temp7S REG_v(v15); + register const_vector unsigned char vzero REG_v(v31)= + (const_vector unsigned char)vec_splat_u8(0); { - register const_vector signed short vprod1 REG_v(v16)= (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1); - register const_vector signed short vprod2 REG_v(v17)= (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1); - register const_vector signed short vprod3 REG_v(v18)= (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1); - register const_vector unsigned char perm1 REG_v(v19)= (const_vector unsigned char) - AVV(0x02, 0x03, 0x00, 0x01, - 0x06, 0x07, 0x04, 0x05, - 0x0A, 0x0B, 0x08, 0x09, - 0x0E, 0x0F, 0x0C, 0x0D); - register const_vector unsigned char perm2 REG_v(v20)= (const_vector unsigned char) - AVV(0x04, 0x05, 0x06, 0x07, - 0x00, 0x01, 0x02, 0x03, - 0x0C, 0x0D, 0x0E, 0x0F, - 0x08, 0x09, 0x0A, 0x0B); - register const_vector unsigned char perm3 REG_v(v21)= (const_vector unsigned char) - AVV(0x08, 0x09, 0x0A, 0x0B, - 0x0C, 0x0D, 0x0E, 0x0F, - 0x00, 0x01, 0x02, 0x03, - 0x04, 0x05, 0x06, 0x07); + register const_vector signed short vprod1 REG_v(v16)= + (const_vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1); + register const_vector signed short vprod2 REG_v(v17)= + (const_vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1); + register const_vector signed short vprod3 REG_v(v18)= + (const_vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1); + register const_vector unsigned char perm1 REG_v(v19)= + (const_vector unsigned char) + AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05, + 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D); + register const_vector unsigned char perm2 REG_v(v20)= + (const_vector unsigned char) + AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03, + 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B); + register const_vector unsigned char perm3 REG_v(v21)= + (const_vector unsigned char) + AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07); #define ONEITERBUTTERFLY(i, res1, res2) \ { \ @@ -1642,27 +1409,27 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){ POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1); - int score; + int score; POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1); - score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8); - if (h==16) { - dst += 8*stride; - src += 8*stride; - score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8); - } + score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8); + if (h==16) { + dst += 8*stride; + src += 8*stride; + score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8); + } POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1); - return score; + return score; } int has_altivec(void) { #ifdef __AMIGAOS4__ - ULONG result = 0; - extern struct ExecIFace *IExec; + ULONG result = 0; + extern struct ExecIFace *IExec; - IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE); - if (result == VECTORTYPE_ALTIVEC) return 1; - return 0; + IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE); + if (result == VECTORTYPE_ALTIVEC) return 1; + return 0; #else /* __AMIGAOS4__ */ #ifdef CONFIG_DARWIN @@ -1698,112 +1465,127 @@ int has_altivec(void) #endif /* __AMIGAOS4__ */ } +static void vorbis_inverse_coupling_altivec(float *mag, float *ang, + int blocksize) +{ + int i; + vector float m, a; + vector bool int t0, t1; + const vector unsigned int v_31 = //XXX + vec_add(vec_add(vec_splat_u32(15),vec_splat_u32(15)),vec_splat_u32(1)); + for(i=0; il); - const uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l); - uint32_t l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL; - uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - uint32_t l1, h1; - pixels += line_size; - for (i = 0; i < h; i += 2) { - uint32_t a = (((const struct unaligned_32 *) (pixels))->l); - uint32_t b = (((const struct unaligned_32 *) (pixels + 1))->l); - l1 = (a & 0x03030303UL) + (b & 0x03030303UL); - h1 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); - pixels += line_size; - block += line_size; - a = (((const struct unaligned_32 *) (pixels))->l); - b = (((const struct unaligned_32 *) (pixels + 1))->l); - l0 = (a & 0x03030303UL) + (b & 0x03030303UL) + 0x02020202UL; - h0 = ((a & 0xFCFCFCFCUL) >> 2) + ((b & 0xFCFCFCFCUL) >> 2); - *((uint32_t *) block) = rnd_avg32(*((uint32_t *) block), h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); - pixels += line_size; - block += line_size; - } pixels += 4 - line_size * (h + 1); - block += 4 - line_size * h; - } -POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1); -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ - register int i; - register vector unsigned char - pixelsv1, pixelsv2, - pixelsavg; - register vector unsigned char - blockv, temp1, temp2, blocktemp; - register vector unsigned short - pixelssum1, pixelssum2, temp3; - register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0); - register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2); - - temp1 = vec_ld(0, pixels); - temp2 = vec_ld(16, pixels); - pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); - if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) - { - pixelsv2 = temp2; - } - else - { - pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels)); - } - pixelsv1 = vec_mergeh(vczero, pixelsv1); - pixelsv2 = vec_mergeh(vczero, pixelsv2); - pixelssum1 = vec_add((vector unsigned short)pixelsv1, - (vector unsigned short)pixelsv2); - pixelssum1 = vec_add(pixelssum1, vctwo); + register int i; + register vector unsigned char pixelsv1, pixelsv2, pixelsavg; + register vector unsigned char blockv, temp1, temp2, blocktemp; + register vector unsigned short pixelssum1, pixelssum2, temp3; + + register const_vector unsigned char vczero = (const_vector unsigned char) + vec_splat_u8(0); + register const_vector unsigned short vctwo = (const_vector unsigned short) + vec_splat_u16(2); + + temp1 = vec_ld(0, pixels); + temp2 = vec_ld(16, pixels); + pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); + if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) { + pixelsv2 = temp2; + } else { + pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels)); + } + pixelsv1 = vec_mergeh(vczero, pixelsv1); + pixelsv2 = vec_mergeh(vczero, pixelsv2); + pixelssum1 = vec_add((vector unsigned short)pixelsv1, + (vector unsigned short)pixelsv2); + pixelssum1 = vec_add(pixelssum1, vctwo); POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1); - for (i = 0; i < h ; i++) { - int rightside = ((unsigned long)block & 0x0000000F); - blockv = vec_ld(0, block); + for (i = 0; i < h ; i++) { + int rightside = ((unsigned long)block & 0x0000000F); + blockv = vec_ld(0, block); + + temp1 = vec_ld(line_size, pixels); + temp2 = vec_ld(line_size + 16, pixels); + pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels)); + if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) + { + pixelsv2 = temp2; + } else { + pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels)); + } + + pixelsv1 = vec_mergeh(vczero, pixelsv1); + pixelsv2 = vec_mergeh(vczero, pixelsv2); + pixelssum2 = vec_add((vector unsigned short)pixelsv1, + (vector unsigned short)pixelsv2); + temp3 = vec_add(pixelssum1, pixelssum2); + temp3 = vec_sra(temp3, vctwo); + pixelssum1 = vec_add(pixelssum2, vctwo); + pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero); + + if (rightside) { + blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1)); + } else { + blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3)); + } + + blockv = vec_avg(blocktemp, blockv); + vec_st(blockv, 0, block); - temp1 = vec_ld(line_size, pixels); - temp2 = vec_ld(line_size + 16, pixels); - pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels)); - if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) - { - pixelsv2 = temp2; - } - else - { - pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels)); - } - - pixelsv1 = vec_mergeh(vczero, pixelsv1); - pixelsv2 = vec_mergeh(vczero, pixelsv2); - pixelssum2 = vec_add((vector unsigned short)pixelsv1, - (vector unsigned short)pixelsv2); - temp3 = vec_add(pixelssum1, pixelssum2); - temp3 = vec_sra(temp3, vctwo); - pixelssum1 = vec_add(pixelssum2, vctwo); - pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero); - - if (rightside) - { - blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1)); - } - else - { - blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3)); - } - - blockv = vec_avg(blocktemp, blockv); - vec_st(blockv, 0, block); - - block += line_size; - pixels += line_size; - } + block += line_size; + pixels += line_size; + } POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1); -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ +} + +void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx) +{ + c->pix_abs[0][1] = sad16_x2_altivec; + c->pix_abs[0][2] = sad16_y2_altivec; + c->pix_abs[0][3] = sad16_xy2_altivec; + c->pix_abs[0][0] = sad16_altivec; + c->pix_abs[1][0] = sad8_altivec; + c->sad[0]= sad16_altivec; + c->sad[1]= sad8_altivec; + c->pix_norm1 = pix_norm1_altivec; + c->sse[1]= sse8_altivec; + c->sse[0]= sse16_altivec; + c->pix_sum = pix_sum_altivec; + c->diff_pixels = diff_pixels_altivec; + c->get_pixels = get_pixels_altivec; + c->add_bytes= add_bytes_altivec; + c->put_pixels_tab[0][0] = put_pixels16_altivec; + /* the two functions do the same thing, so use the same code */ + c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec; + c->avg_pixels_tab[0][0] = avg_pixels16_altivec; + c->avg_pixels_tab[1][0] = avg_pixels8_altivec; + c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec; + c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec; + c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec; + c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec; + c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec; + + c->hadamard8_diff[0] = hadamard8_diff16_altivec; + c->hadamard8_diff[1] = hadamard8_diff8x8_altivec; +#ifdef CONFIG_VORBIS_DECODER + c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec; +#endif } diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h index ac54817d0..560d778bb 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h +++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h @@ -3,18 +3,20 @@ * Copyright (c) 2002 Dieter Shirley * Copyright (c) 2003-2004 Romain Dolbeau * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -25,34 +27,11 @@ #ifdef HAVE_ALTIVEC -extern int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); -extern int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); -extern int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); -extern int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); -extern int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); -extern int pix_norm1_altivec(uint8_t *pix, int line_size); -extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); -extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h); -extern int pix_sum_altivec(uint8_t * pix, int line_size); -extern void diff_pixels_altivec(DCTELEM* block, const uint8_t* s1, const uint8_t* s2, int stride); -extern void get_pixels_altivec(DCTELEM* block, const uint8_t * pixels, int line_size); +extern int has_altivec(void); -extern void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w); -extern void put_pixels_clamped_altivec(const DCTELEM *block, uint8_t *restrict pixels, int line_size); -extern void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); -extern void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); -extern void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h); -extern void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); -extern void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); -extern void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h); -extern void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, int line_size, int h); -extern int hadamard8_diff8x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h); -extern int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h); -extern void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); +void put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); -extern void gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder); - -extern int has_altivec(void); +void avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, int line_size, int h); // used to build registers permutation vectors (vcprm) // the 's' are for words in the _s_econd vector @@ -88,10 +67,40 @@ extern int has_altivec(void); #define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d} #endif -#else /* HAVE_ALTIVEC */ -#ifdef ALTIVEC_USE_REFERENCE_C_CODE -#error "I can't use ALTIVEC_USE_REFERENCE_C_CODE if I don't use HAVE_ALTIVEC" -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ +// Transpose 8x8 matrix of 16-bit elements (in-place) +#define TRANSPOSE8(a,b,c,d,e,f,g,h) \ +do { \ + vector signed short A1, B1, C1, D1, E1, F1, G1, H1; \ + vector signed short A2, B2, C2, D2, E2, F2, G2, H2; \ + \ + A1 = vec_mergeh (a, e); \ + B1 = vec_mergel (a, e); \ + C1 = vec_mergeh (b, f); \ + D1 = vec_mergel (b, f); \ + E1 = vec_mergeh (c, g); \ + F1 = vec_mergel (c, g); \ + G1 = vec_mergeh (d, h); \ + H1 = vec_mergel (d, h); \ + \ + A2 = vec_mergeh (A1, E1); \ + B2 = vec_mergel (A1, E1); \ + C2 = vec_mergeh (B1, F1); \ + D2 = vec_mergel (B1, F1); \ + E2 = vec_mergeh (C1, G1); \ + F2 = vec_mergel (C1, G1); \ + G2 = vec_mergeh (D1, H1); \ + H2 = vec_mergel (D1, H1); \ + \ + a = vec_mergeh (A2, E2); \ + b = vec_mergel (A2, E2); \ + c = vec_mergeh (B2, F2); \ + d = vec_mergel (B2, F2); \ + e = vec_mergeh (C2, G2); \ + f = vec_mergel (C2, G2); \ + g = vec_mergeh (D2, H2); \ + h = vec_mergel (D2, H2); \ +} while (0) + #endif /* HAVE_ALTIVEC */ #endif /* _DSPUTIL_ALTIVEC_ */ diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c index b63c8dd84..9169eaef0 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c @@ -3,18 +3,20 @@ * Copyright (c) 2002 Dieter Shirley * Copyright (c) 2003-2004 Romain Dolbeau * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -24,22 +26,21 @@ #ifdef HAVE_ALTIVEC #include "dsputil_altivec.h" -#endif extern void fdct_altivec(int16_t *block); +extern void gmc1_altivec(uint8_t *dst, uint8_t *src, int stride, int h, + int x16, int y16, int rounder); extern void idct_put_altivec(uint8_t *dest, int line_size, int16_t *block); extern void idct_add_altivec(uint8_t *dest, int line_size, int16_t *block); -extern void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width); -extern void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, - DWTELEM *b2, DWTELEM *b3, - DWTELEM *b4, DWTELEM *b5, - int width); -extern void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride, - uint8_t * * block, int b_w, int b_h, - int src_x, int src_y, int src_stride, - slice_buffer * sb, int add, - uint8_t * dst8); +void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx); + +void dsputil_init_altivec(DSPContext* c, AVCodecContext *avctx); +void vc1dsp_init_altivec(DSPContext* c, AVCodecContext *avctx); +void snow_init_altivec(DSPContext* c, AVCodecContext *avctx); +void float_init_altivec(DSPContext* c, AVCodecContext *avctx); + +#endif int mm_flags = 0; @@ -100,7 +101,7 @@ void powerpc_display_perf_report(void) { if (perfdata[j][i][powerpc_data_num] != (unsigned long long)0) av_log(NULL, AV_LOG_INFO, - " Function \"%s\" (pmc%d):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n", + " Function \"%s\" (pmc%d):\n\tmin: %"PRIu64"\n\tmax: %"PRIu64"\n\tavg: %1.2lf (%"PRIu64")\n", perfname[i], j+1, perfdata[j][i][powerpc_data_min], @@ -174,7 +175,7 @@ POWERPC_PERF_STOP_COUNT(powerpc_clear_blocks_dcbz32, 1); /* same as above, when dcbzl clear a whole 128B cache line i.e. the PPC970 aka G5 */ -#ifndef NO_DCBZL +#ifdef HAVE_DCBZL void clear_blocks_dcbz128_ppc(DCTELEM *blocks) { POWERPC_PERF_DECLARE(powerpc_clear_blocks_dcbz128, 1); @@ -204,7 +205,7 @@ void clear_blocks_dcbz128_ppc(DCTELEM *blocks) } #endif -#ifndef NO_DCBZL +#ifdef HAVE_DCBZL /* check dcbz report how many bytes are set to 0 by dcbz */ /* update 24/06/2003 : replace dcbz by dcbzl to get the intended effect (Apple "fixed" dcbz) @@ -248,69 +249,43 @@ long check_dcbzl_effect(void) } #endif - -void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx); +static void prefetch_ppc(void *mem, int stride, int h) +{ + register const uint8_t *p = mem; + do { + asm volatile ("dcbt 0,%0" : : "r" (p)); + p+= stride; + } while(--h); +} void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) { // Common optimizations whether Altivec is available or not - - switch (check_dcbzl_effect()) { - case 32: - c->clear_blocks = clear_blocks_dcbz32_ppc; - break; - case 128: - c->clear_blocks = clear_blocks_dcbz128_ppc; - break; - default: - break; - } + c->prefetch = prefetch_ppc; + switch (check_dcbzl_effect()) { + case 32: + c->clear_blocks = clear_blocks_dcbz32_ppc; + break; + case 128: + c->clear_blocks = clear_blocks_dcbz128_ppc; + break; + default: + break; + } #ifdef HAVE_ALTIVEC - dsputil_h264_init_ppc(c, avctx); + if(ENABLE_H264_DECODER) dsputil_h264_init_ppc(c, avctx); if (has_altivec()) { mm_flags |= MM_ALTIVEC; - // Altivec specific optimisations - c->pix_abs[0][1] = sad16_x2_altivec; - c->pix_abs[0][2] = sad16_y2_altivec; - c->pix_abs[0][3] = sad16_xy2_altivec; - c->pix_abs[0][0] = sad16_altivec; - c->pix_abs[1][0] = sad8_altivec; - c->sad[0]= sad16_altivec; - c->sad[1]= sad8_altivec; - c->pix_norm1 = pix_norm1_altivec; - c->sse[1]= sse8_altivec; - c->sse[0]= sse16_altivec; - c->pix_sum = pix_sum_altivec; - c->diff_pixels = diff_pixels_altivec; - c->get_pixels = get_pixels_altivec; -// next one disabled as it's untested. -#if 0 - c->add_bytes= add_bytes_altivec; -#endif /* 0 */ - c->put_pixels_tab[0][0] = put_pixels16_altivec; - /* the two functions do the same thing, so use the same code */ - c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec; - c->avg_pixels_tab[0][0] = avg_pixels16_altivec; - c->avg_pixels_tab[1][0] = avg_pixels8_altivec; - c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec; - c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec; - c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec; - c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec; - c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec; - + dsputil_init_altivec(c, avctx); + if(ENABLE_SNOW_DECODER) snow_init_altivec(c, avctx); + if(ENABLE_VC1_DECODER || ENABLE_WMV3_DECODER) + vc1dsp_init_altivec(c, avctx); + float_init_altivec(c, avctx); c->gmc1 = gmc1_altivec; - c->hadamard8_diff[0] = hadamard8_diff16_altivec; - c->hadamard8_diff[1] = hadamard8_diff8x8_altivec; - - - c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec; - c->vertical_compose97i = ff_snow_vertical_compose97i_altivec; - c->inner_add_yblock = ff_snow_inner_add_yblock_altivec; - #ifdef CONFIG_ENCODERS if (avctx->dct_algo == FF_DCT_AUTO || avctx->dct_algo == FF_DCT_ALTIVEC) @@ -319,20 +294,16 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) } #endif //CONFIG_ENCODERS - if (avctx->lowres==0) - { + if (avctx->lowres==0) + { if ((avctx->idct_algo == FF_IDCT_AUTO) || (avctx->idct_algo == FF_IDCT_ALTIVEC)) { c->idct_put = idct_put_altivec; c->idct_add = idct_add_altivec; -#ifndef ALTIVEC_USE_REFERENCE_C_CODE c->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ - c->idct_permutation_type = FF_NO_IDCT_PERM; -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } - } + } #ifdef POWERPC_PERFORMANCE_REPORT { @@ -349,11 +320,6 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) } } #endif /* POWERPC_PERFORMANCE_REPORT */ - } else -#endif /* HAVE_ALTIVEC */ - { - // Non-AltiVec PPC optimisations - - // ... pending ... } +#endif /* HAVE_ALTIVEC */ } diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h index 966ffa71a..ab2b05780 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h +++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h @@ -1,35 +1,26 @@ /* * Copyright (c) 2003-2004 Romain Dolbeau * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _DSPUTIL_PPC_ #define _DSPUTIL_PPC_ -#ifdef CONFIG_DARWIN -/* The Apple assembler shipped w/ gcc-3.3 knows about DCBZL, previous assemblers don't - We assume here that the Darwin GCC is from Apple.... */ -#if (__GNUC__ * 100 + __GNUC_MINOR__ < 303) -#define NO_DCBZL -#endif -#else /* CONFIG_DARWIN */ -/* I don't think any non-Apple assembler knows about DCBZL */ -#define NO_DCBZL -#endif /* CONFIG_DARWIN */ - #ifdef POWERPC_PERFORMANCE_REPORT void powerpc_display_perf_report(void); /* the 604* have 2, the G3* have 4, the G4s have 6, diff --git a/src/libffmpeg/libavcodec/ppc/fdct_altivec.c b/src/libffmpeg/libavcodec/ppc/fdct_altivec.c index f5778c24e..2418c32bb 100644 --- a/src/libffmpeg/libavcodec/ppc/fdct_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/fdct_altivec.c @@ -2,18 +2,20 @@ * AltiVec optimized library for the FFMPEG Multimedia System * Copyright (C) 2003 James Klicman * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -196,12 +198,6 @@ static vector float fdctconsts[3] = { void fdct_altivec(int16_t *block) { POWERPC_PERF_DECLARE(altivec_fdct, 1); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE -POWERPC_PERF_START_COUNT(altivec_fdct, 1); - void ff_jpeg_fdct_islow(int16_t *block); - ff_jpeg_fdct_islow(block); -POWERPC_PERF_STOP_COUNT(altivec_fdct, 1); -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ vector signed short *bp; vector float *cp; vector float b00, b10, b20, b30, b40, b50, b60, b70; @@ -492,7 +488,6 @@ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1); /* }}} */ POWERPC_PERF_STOP_COUNT(altivec_fdct, 1); -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } /* vim:set foldmethod=marker foldlevel=0: */ diff --git a/src/libffmpeg/libavcodec/ppc/fft_altivec.c b/src/libffmpeg/libavcodec/ppc/fft_altivec.c index f4ea78359..384a774ff 100644 --- a/src/libffmpeg/libavcodec/ppc/fft_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/fft_altivec.c @@ -4,18 +4,20 @@ * Copyright (c) 2003 Romain Dolbeau * Based on code Copyright (c) 2002 Fabrice Bellard. * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "../dsputil.h" @@ -63,88 +65,7 @@ void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z) { POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE - int ln = s->nbits; - int j, np, np2; - int nblocks, nloops; - register FFTComplex *p, *q; - FFTComplex *exptab = s->exptab; - int l; - FFTSample tmp_re, tmp_im; - -POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); - - np = 1 << ln; - - /* pass 0 */ - - p=&z[0]; - j=(np >> 1); - do { - BF(p[0].re, p[0].im, p[1].re, p[1].im, - p[0].re, p[0].im, p[1].re, p[1].im); - p+=2; - } while (--j != 0); - - /* pass 1 */ - - - p=&z[0]; - j=np >> 2; - if (s->inverse) { - do { - BF(p[0].re, p[0].im, p[2].re, p[2].im, - p[0].re, p[0].im, p[2].re, p[2].im); - BF(p[1].re, p[1].im, p[3].re, p[3].im, - p[1].re, p[1].im, -p[3].im, p[3].re); - p+=4; - } while (--j != 0); - } else { - do { - BF(p[0].re, p[0].im, p[2].re, p[2].im, - p[0].re, p[0].im, p[2].re, p[2].im); - BF(p[1].re, p[1].im, p[3].re, p[3].im, - p[1].re, p[1].im, p[3].im, -p[3].re); - p+=4; - } while (--j != 0); - } - /* pass 2 .. ln-1 */ - - nblocks = np >> 3; - nloops = 1 << 2; - np2 = np >> 1; - do { - p = z; - q = z + nloops; - for (j = 0; j < nblocks; ++j) { - BF(p->re, p->im, q->re, q->im, - p->re, p->im, q->re, q->im); - - p++; - q++; - for(l = nblocks; l < np2; l += nblocks) { - CMUL(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im); - BF(p->re, p->im, q->re, q->im, - p->re, p->im, tmp_re, tmp_im); - p++; - q++; - } - - p += nloops; - q += nloops; - } - nblocks = nblocks >> 1; - nloops = nloops << 1; - } while (nblocks != 0); - -POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6); - -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ -#ifdef CONFIG_DARWIN - register const vector float vczero = (const vector float)(0.); -#else - register const vector float vczero = (const vector float){0.,0.,0.,0.}; -#endif + register const vector float vczero = (const vector float)vec_splat_u32(0.); int ln = s->nbits; int j, np, np2; @@ -242,6 +163,4 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); } while (nblocks != 0); POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6); - -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } diff --git a/src/libffmpeg/libavcodec/ppc/gcc_fixes.h b/src/libffmpeg/libavcodec/ppc/gcc_fixes.h index 943905bc5..5a4a55188 100644 --- a/src/libffmpeg/libavcodec/ppc/gcc_fixes.h +++ b/src/libffmpeg/libavcodec/ppc/gcc_fixes.h @@ -2,6 +2,22 @@ * gcc fixes for altivec. * Used to workaround broken gcc (FSF gcc-3 pre gcc-3.3) * and to stay somewhat compatible with Darwin. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _GCC_FIXES_ diff --git a/src/libffmpeg/libavcodec/ppc/gmc_altivec.c b/src/libffmpeg/libavcodec/ppc/gmc_altivec.c index 04978d825..42c936bb3 100644 --- a/src/libffmpeg/libavcodec/ppc/gmc_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/gmc_altivec.c @@ -3,18 +3,20 @@ * AltiVec-enabled * Copyright (c) 2003 Romain Dolbeau * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -32,32 +34,6 @@ void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int stride, int h, int x16, int y16, int rounder) { POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE - const int A=(16-x16)*(16-y16); - const int B=( x16)*(16-y16); - const int C=(16-x16)*( y16); - const int D=( x16)*( y16); - int i; - -POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); - - for(i=0; i>8; - dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8; - dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8; - dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8; - dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8; - dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8; - dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8; - dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8; - dst+= stride; - src+= stride; - } - -POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND); - -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ const unsigned short __attribute__ ((aligned(16))) rounder_a[8] = {rounder, rounder, rounder, rounder, rounder, rounder, rounder, rounder}; @@ -167,6 +143,4 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); } POWERPC_PERF_STOP_COUNT(altivec_gmc1_num, GMC1_PERF_COND); - -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } diff --git a/src/libffmpeg/libavcodec/ppc/idct_altivec.c b/src/libffmpeg/libavcodec/ppc/idct_altivec.c index 93d63cfd3..cee46fc25 100644 --- a/src/libffmpeg/libavcodec/ppc/idct_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/idct_altivec.c @@ -1,18 +1,20 @@ /* * Copyright (c) 2001 Michel Lespinasse * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ @@ -167,12 +169,6 @@ static const_vector_s16_t constants[5] = { void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block) { POWERPC_PERF_DECLARE(altivec_idct_put_num, 1); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE -POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1); - void simple_idct_put(uint8_t *dest, int line_size, int16_t *block); - simple_idct_put(dest, stride, (int16_t*)block); -POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1); -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ vector_u8_t tmp; #ifdef POWERPC_PERFORMANCE_REPORT @@ -195,18 +191,11 @@ POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1); COPY (dest, vx7) POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1); -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block) { POWERPC_PERF_DECLARE(altivec_idct_add_num, 1); -#ifdef ALTIVEC_USE_REFERENCE_C_CODE -POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1); - void simple_idct_add(uint8_t *dest, int line_size, int16_t *block); - simple_idct_add(dest, stride, (int16_t*)block); -POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1); -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ vector_u8_t tmp; vector_s16_t tmp2, tmp3; vector_u8_t perm0; @@ -244,6 +233,5 @@ POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1); ADD (dest, vx7, perm1) POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1); -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c b/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c index 7a771a8ec..3822cb20e 100644 --- a/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c @@ -4,18 +4,20 @@ * dct_unquantize_h263_altivec: * Copyright (c) 2003 Romain Dolbeau * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -50,39 +52,6 @@ do { \ d = vec_mergel(_trans_acl, _trans_bdl); \ } while (0) -#define TRANSPOSE8(a,b,c,d,e,f,g,h) \ -do { \ - __typeof__(a) _A1, _B1, _C1, _D1, _E1, _F1, _G1, _H1; \ - __typeof__(a) _A2, _B2, _C2, _D2, _E2, _F2, _G2, _H2; \ - \ - _A1 = vec_mergeh (a, e); \ - _B1 = vec_mergel (a, e); \ - _C1 = vec_mergeh (b, f); \ - _D1 = vec_mergel (b, f); \ - _E1 = vec_mergeh (c, g); \ - _F1 = vec_mergel (c, g); \ - _G1 = vec_mergeh (d, h); \ - _H1 = vec_mergel (d, h); \ - \ - _A2 = vec_mergeh (_A1, _E1); \ - _B2 = vec_mergel (_A1, _E1); \ - _C2 = vec_mergeh (_B1, _F1); \ - _D2 = vec_mergel (_B1, _F1); \ - _E2 = vec_mergeh (_C1, _G1); \ - _F2 = vec_mergel (_C1, _G1); \ - _G2 = vec_mergeh (_D1, _H1); \ - _H2 = vec_mergel (_D1, _H1); \ - \ - a = vec_mergeh (_A2, _E2); \ - b = vec_mergel (_A2, _E2); \ - c = vec_mergeh (_B2, _F2); \ - d = vec_mergel (_B2, _F2); \ - e = vec_mergeh (_C2, _G2); \ - f = vec_mergel (_C2, _G2); \ - g = vec_mergeh (_D2, _H2); \ - h = vec_mergel (_D2, _H2); \ -} while (0) - // Loads a four-byte value (int or float) from the target address // into every element in the target vector. Only works if the @@ -552,19 +521,6 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1); nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; } -#ifdef ALTIVEC_USE_REFERENCE_C_CODE - for(;i<=nCoeffs;i++) { - level = block[i]; - if (level) { - if (level < 0) { - level = level * qmul - qadd; - } else { - level = level * qmul + qadd; - } - block[i] = level; - } - } -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ { register const_vector signed short vczero = (const_vector signed short)vec_splat_s16(0); short __attribute__ ((aligned(16))) qmul8[] = @@ -643,7 +599,5 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1); block[0] = backup_0; } } -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ - POWERPC_PERF_STOP_COUNT(altivec_dct_unquantize_h263_num, nCoeffs == 63); } diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c b/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c index b391b4294..c5e822f77 100644 --- a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c +++ b/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c @@ -1,18 +1,20 @@ /* * Copyright (c) 2002 Dieter Shirley * - * This library is free software; you can redistribute it and/or + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. + * version 2.1 of the License, or (at your option) any later version. * - * This library is distributed in the hope that it will be useful, + * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software + * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ @@ -46,11 +48,7 @@ void MPV_common_init_ppc(MpegEncContext *s) { s->dsp.idct_put = idct_put_altivec; s->dsp.idct_add = idct_add_altivec; -#ifndef ALTIVEC_USE_REFERENCE_C_CODE s->dsp.idct_permutation_type = FF_TRANSPOSE_IDCT_PERM; -#else /* ALTIVEC_USE_REFERENCE_C_CODE */ - s->dsp.idct_permutation_type = FF_NO_IDCT_PERM; -#endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } } -- cgit v1.2.3 From 6e8ff6e5c232de4b8235626af31ab85345120a93 Mon Sep 17 00:00:00 2001 From: Miguel Freitas Date: Sat, 13 Jan 2007 21:19:52 +0000 Subject: * ffmpeg update to 51.28.0 * Workaround ffmpeg buggy codecs that don't release their DR1 frames. * Fix several segfaults and freezing problem with H264 streams that use a lot of reference frames (eg. 15) * Initial support to enable/disable ffmpeg codecs. Codecs may be disabled in groups by --disable-ffmpeg-uncommon-codecs/--disable-ffmpeg-popular-codecs Think of "uncommon" codecs what people would never want to play with their PDAs (they will save memory by removing them). Note: currently both uncommon/popular codecs are _build_ but disabled. that is, build system still need some improvements to really save memory. warning: non-autoconf guru playing with the build system, likely breakage. CVS patchset: 8499 CVS date: 2007/01/13 21:19:52 --- src/libffmpeg/libavcodec/ppc/Makefile.am | 12 +- src/libffmpeg/libavcodec/ppc/float_altivec.c | 194 +++++ src/libffmpeg/libavcodec/ppc/h264_altivec.c | 565 +++++++++++++++ .../libavcodec/ppc/h264_template_altivec.c | 719 +++++++++++++++++++ src/libffmpeg/libavcodec/ppc/mathops.h | 33 + src/libffmpeg/libavcodec/ppc/snow_altivec.c | 788 +++++++++++++++++++++ src/libffmpeg/libavcodec/ppc/types_altivec.h | 41 ++ src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c | 338 +++++++++ 8 files changed, 2685 insertions(+), 5 deletions(-) create mode 100644 src/libffmpeg/libavcodec/ppc/float_altivec.c create mode 100644 src/libffmpeg/libavcodec/ppc/h264_altivec.c create mode 100644 src/libffmpeg/libavcodec/ppc/h264_template_altivec.c create mode 100644 src/libffmpeg/libavcodec/ppc/mathops.h create mode 100644 src/libffmpeg/libavcodec/ppc/snow_altivec.c create mode 100644 src/libffmpeg/libavcodec/ppc/types_altivec.h create mode 100644 src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c (limited to 'src/libffmpeg/libavcodec/ppc') diff --git a/src/libffmpeg/libavcodec/ppc/Makefile.am b/src/libffmpeg/libavcodec/ppc/Makefile.am index 00e796f6d..d52cc481e 100644 --- a/src/libffmpeg/libavcodec/ppc/Makefile.am +++ b/src/libffmpeg/libavcodec/ppc/Makefile.am @@ -12,14 +12,17 @@ noinst_LTLIBRARIES = libavcodec_ppc.la libavcodec_ppc_src = dsputil_altivec.c \ dsputil_ppc.c \ - dsputil_h264_altivec.c \ - dsputil_h264_template_altivec.c \ + h264_altivec.c \ + h264_template_altivec.c \ fdct_altivec.c \ fft_altivec.c \ + float_altivec.c \ idct_altivec.c \ gmc_altivec.c \ mpegvideo_altivec.c \ - mpegvideo_ppc.c + mpegvideo_ppc.c \ + snow_altivec.c \ + vc1dsp_altivec.c libavcodec_ppc_dummy = libavcodec_ppc_dummy.c EXTRA_DIST = $(libavcodec_ppc_src) $(libavcodec_ppc_dummy) @@ -28,7 +31,6 @@ EXTRA_DIST = $(libavcodec_ppc_src) $(libavcodec_ppc_dummy) #ppc_modules = $(libavcodec_ppc_src) #endif - libavcodec_ppc_la_SOURCES = $(ppc_modules) $(libavcodec_ppc_dummy) -noinst_HEADERS = dsputil_altivec.h dsputil_ppc.h gcc_fixes.h +noinst_HEADERS = dsputil_altivec.h dsputil_ppc.h gcc_fixes.h mathops.h types_altivec.h diff --git a/src/libffmpeg/libavcodec/ppc/float_altivec.c b/src/libffmpeg/libavcodec/ppc/float_altivec.c new file mode 100644 index 000000000..c6e43dec2 --- /dev/null +++ b/src/libffmpeg/libavcodec/ppc/float_altivec.c @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2006 Luca Barbato + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "../dsputil.h" + +#include "gcc_fixes.h" + +#include "dsputil_altivec.h" + +static void vector_fmul_altivec(float *dst, const float *src, int len) +{ + int i; + vector float d0, d1, s, zero = (vector float)vec_splat_u32(0); + for(i=0; ivector_fmul = vector_fmul_altivec; + c->vector_fmul_reverse = vector_fmul_reverse_altivec; + c->vector_fmul_add_add = vector_fmul_add_add_altivec; + if(!(avctx->flags & CODEC_FLAG_BITEXACT)) + c->float_to_int16 = float_to_int16_altivec; +} diff --git a/src/libffmpeg/libavcodec/ppc/h264_altivec.c b/src/libffmpeg/libavcodec/ppc/h264_altivec.c new file mode 100644 index 000000000..bac620e82 --- /dev/null +++ b/src/libffmpeg/libavcodec/ppc/h264_altivec.c @@ -0,0 +1,565 @@ +/* + * Copyright (c) 2004 Romain Dolbeau + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "../dsputil.h" + +#include "gcc_fixes.h" + +#include "dsputil_altivec.h" +#include "types_altivec.h" + +#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s +#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s) + +#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC +#define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec +#define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num +#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec +#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num +#define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec +#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num +#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec +#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num +#include "h264_template_altivec.c" +#undef OP_U8_ALTIVEC +#undef PREFIX_h264_chroma_mc8_altivec +#undef PREFIX_h264_chroma_mc8_num +#undef PREFIX_h264_qpel16_h_lowpass_altivec +#undef PREFIX_h264_qpel16_h_lowpass_num +#undef PREFIX_h264_qpel16_v_lowpass_altivec +#undef PREFIX_h264_qpel16_v_lowpass_num +#undef PREFIX_h264_qpel16_hv_lowpass_altivec +#undef PREFIX_h264_qpel16_hv_lowpass_num + +#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC +#define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec +#define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num +#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec +#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num +#define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec +#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num +#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec +#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num +#include "h264_template_altivec.c" +#undef OP_U8_ALTIVEC +#undef PREFIX_h264_chroma_mc8_altivec +#undef PREFIX_h264_chroma_mc8_num +#undef PREFIX_h264_qpel16_h_lowpass_altivec +#undef PREFIX_h264_qpel16_h_lowpass_num +#undef PREFIX_h264_qpel16_v_lowpass_altivec +#undef PREFIX_h264_qpel16_v_lowpass_num +#undef PREFIX_h264_qpel16_hv_lowpass_altivec +#undef PREFIX_h264_qpel16_hv_lowpass_num + +#define H264_MC(OPNAME, SIZE, CODETYPE) \ +static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \ + DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\ + put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\ + put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\ + put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\ + put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\ + put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ + put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\ + put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ + put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\ + put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ + put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\ + put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ + put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\ + OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\ + put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ + put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\ + put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ + put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\ + put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ + put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ +}\ +\ +static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ + DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\ + DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\ + put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ + put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ + OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ +}\ + +/* this code assume that stride % 16 == 0 */ +void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { + signed int ABCD[4] __attribute__((aligned(16))) = + {((8 - x) * (8 - y)), + ((x) * (8 - y)), + ((8 - x) * (y)), + ((x) * (y))}; + register int i; + vector unsigned char fperm; + const vector signed int vABCD = vec_ld(0, ABCD); + const vector signed short vA = vec_splat((vector signed short)vABCD, 1); + const vector signed short vB = vec_splat((vector signed short)vABCD, 3); + const vector signed short vC = vec_splat((vector signed short)vABCD, 5); + const vector signed short vD = vec_splat((vector signed short)vABCD, 7); + const vector signed int vzero = vec_splat_s32(0); + const vector signed short v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4)); + const vector unsigned short v6us = vec_splat_u16(6); + register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; + register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; + + vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1; + vector unsigned char vsrc0uc, vsrc1uc; + vector signed short vsrc0ssH, vsrc1ssH; + vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc; + vector signed short vsrc2ssH, vsrc3ssH, psum; + vector unsigned char vdst, ppsum, fsum; + + if (((unsigned long)dst) % 16 == 0) { + fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, + 0x08, 0x09, 0x0A, 0x0B, + 0x0C, 0x0D, 0x0E, 0x0F); + } else { + fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, + 0x18, 0x19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F); + } + + vsrcAuc = vec_ld(0, src); + + if (loadSecond) + vsrcBuc = vec_ld(16, src); + vsrcperm0 = vec_lvsl(0, src); + vsrcperm1 = vec_lvsl(1, src); + + vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); + if (reallyBadAlign) + vsrc1uc = vsrcBuc; + else + vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); + + vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc0uc); + vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc1uc); + + if (!loadSecond) {// -> !reallyBadAlign + for (i = 0 ; i < h ; i++) { + + + vsrcCuc = vec_ld(stride + 0, src); + + vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); + vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); + + vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc2uc); + vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc3uc); + + psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); + psum = vec_mladd(vB, vsrc1ssH, psum); + psum = vec_mladd(vC, vsrc2ssH, psum); + psum = vec_mladd(vD, vsrc3ssH, psum); + psum = vec_add(v28ss, psum); + psum = vec_sra(psum, v6us); + + vdst = vec_ld(0, dst); + ppsum = (vector unsigned char)vec_packsu(psum, psum); + fsum = vec_perm(vdst, ppsum, fperm); + + vec_st(fsum, 0, dst); + + vsrc0ssH = vsrc2ssH; + vsrc1ssH = vsrc3ssH; + + dst += stride; + src += stride; + } + } else { + vector unsigned char vsrcDuc; + for (i = 0 ; i < h ; i++) { + vsrcCuc = vec_ld(stride + 0, src); + vsrcDuc = vec_ld(stride + 16, src); + + vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); + if (reallyBadAlign) + vsrc3uc = vsrcDuc; + else + vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); + + vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc2uc); + vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc3uc); + + psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); + psum = vec_mladd(vB, vsrc1ssH, psum); + psum = vec_mladd(vC, vsrc2ssH, psum); + psum = vec_mladd(vD, vsrc3ssH, psum); + psum = vec_add(v28ss, psum); + psum = vec_sr(psum, v6us); + + vdst = vec_ld(0, dst); + ppsum = (vector unsigned char)vec_pack(psum, psum); + fsum = vec_perm(vdst, ppsum, fperm); + + vec_st(fsum, 0, dst); + + vsrc0ssH = vsrc2ssH; + vsrc1ssH = vsrc3ssH; + + dst += stride; + src += stride; + } + } +} + +static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, + const uint8_t * src2, int dst_stride, + int src_stride1, int h) +{ + int i; + vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align; + + mask_ = vec_lvsl(0, src2); + + for (i = 0; i < h; i++) { + + tmp1 = vec_ld(i * src_stride1, src1); + mask = vec_lvsl(i * src_stride1, src1); + tmp2 = vec_ld(i * src_stride1 + 15, src1); + + a = vec_perm(tmp1, tmp2, mask); + + tmp1 = vec_ld(i * 16, src2); + tmp2 = vec_ld(i * 16 + 15, src2); + + b = vec_perm(tmp1, tmp2, mask_); + + tmp1 = vec_ld(0, dst); + mask = vec_lvsl(0, dst); + tmp2 = vec_ld(15, dst); + + d = vec_avg(a, b); + + edges = vec_perm(tmp2, tmp1, mask); + + align = vec_lvsr(0, dst); + + tmp2 = vec_perm(d, edges, align); + tmp1 = vec_perm(edges, d, align); + + vec_st(tmp2, 15, dst); + vec_st(tmp1, 0 , dst); + + dst += dst_stride; + } +} + +static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, + const uint8_t * src2, int dst_stride, + int src_stride1, int h) +{ + int i; + vector unsigned char a, b, d, tmp1, tmp2, mask, mask_, edges, align; + + mask_ = vec_lvsl(0, src2); + + for (i = 0; i < h; i++) { + + tmp1 = vec_ld(i * src_stride1, src1); + mask = vec_lvsl(i * src_stride1, src1); + tmp2 = vec_ld(i * src_stride1 + 15, src1); + + a = vec_perm(tmp1, tmp2, mask); + + tmp1 = vec_ld(i * 16, src2); + tmp2 = vec_ld(i * 16 + 15, src2); + + b = vec_perm(tmp1, tmp2, mask_); + + tmp1 = vec_ld(0, dst); + mask = vec_lvsl(0, dst); + tmp2 = vec_ld(15, dst); + + d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b)); + + edges = vec_perm(tmp2, tmp1, mask); + + align = vec_lvsr(0, dst); + + tmp2 = vec_perm(d, edges, align); + tmp1 = vec_perm(edges, d, align); + + vec_st(tmp2, 15, dst); + vec_st(tmp1, 0 , dst); + + dst += dst_stride; + } +} + +/* Implemented but could be faster +#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h) +#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h) + */ + + H264_MC(put_, 16, altivec) + H264_MC(avg_, 16, altivec) + + +/**************************************************************************** + * IDCT transform: + ****************************************************************************/ + +#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\ + /* a0 = SRC(0) + SRC(4); */ \ + vec_s16_t a0v = vec_add(s0, s4); \ + /* a2 = SRC(0) - SRC(4); */ \ + vec_s16_t a2v = vec_sub(s0, s4); \ + /* a4 = (SRC(2)>>1) - SRC(6); */ \ + vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \ + /* a6 = (SRC(6)>>1) + SRC(2); */ \ + vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \ + /* b0 = a0 + a6; */ \ + vec_s16_t b0v = vec_add(a0v, a6v); \ + /* b2 = a2 + a4; */ \ + vec_s16_t b2v = vec_add(a2v, a4v); \ + /* b4 = a2 - a4; */ \ + vec_s16_t b4v = vec_sub(a2v, a4v); \ + /* b6 = a0 - a6; */ \ + vec_s16_t b6v = vec_sub(a0v, a6v); \ + /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \ + /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \ + vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \ + /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \ + /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \ + vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\ + /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \ + /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \ + vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\ + /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \ + vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\ + /* b1 = (a7>>2) + a1; */ \ + vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \ + /* b3 = a3 + (a5>>2); */ \ + vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \ + /* b5 = (a3>>2) - a5; */ \ + vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \ + /* b7 = a7 - (a1>>2); */ \ + vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \ + /* DST(0, b0 + b7); */ \ + d0 = vec_add(b0v, b7v); \ + /* DST(1, b2 + b5); */ \ + d1 = vec_add(b2v, b5v); \ + /* DST(2, b4 + b3); */ \ + d2 = vec_add(b4v, b3v); \ + /* DST(3, b6 + b1); */ \ + d3 = vec_add(b6v, b1v); \ + /* DST(4, b6 - b1); */ \ + d4 = vec_sub(b6v, b1v); \ + /* DST(5, b4 - b3); */ \ + d5 = vec_sub(b4v, b3v); \ + /* DST(6, b2 - b5); */ \ + d6 = vec_sub(b2v, b5v); \ + /* DST(7, b0 - b7); */ \ + d7 = vec_sub(b0v, b7v); \ +} + +#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \ + /* unaligned load */ \ + vec_u8_t hv = vec_ld( 0, dest ); \ + vec_u8_t lv = vec_ld( 7, dest ); \ + vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \ + vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \ + vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \ + vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \ + vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \ + vec_u8_t edgehv; \ + /* unaligned store */ \ + vec_u8_t bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\ + vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv ); \ + lv = vec_sel( lv, bodyv, edgelv ); \ + vec_st( lv, 7, dest ); \ + hv = vec_ld( 0, dest ); \ + edgehv = vec_perm( zero_u8v, sel, perm_stv ); \ + hv = vec_sel( hv, bodyv, edgehv ); \ + vec_st( hv, 0, dest ); \ + } + +void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) { + vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7; + vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7; + vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7; + + vec_u8_t perm_ldv = vec_lvsl(0, dst); + vec_u8_t perm_stv = vec_lvsr(8, dst); + + const vec_u16_t onev = vec_splat_u16(1); + const vec_u16_t twov = vec_splat_u16(2); + const vec_u16_t sixv = vec_splat_u16(6); + + const vec_u8_t sel = (vec_u8_t) AVV(0,0,0,0,0,0,0,0, + -1,-1,-1,-1,-1,-1,-1,-1); + LOAD_ZERO; + + dct[0] += 32; // rounding for the >>6 at the end + + s0 = vec_ld(0x00, (int16_t*)dct); + s1 = vec_ld(0x10, (int16_t*)dct); + s2 = vec_ld(0x20, (int16_t*)dct); + s3 = vec_ld(0x30, (int16_t*)dct); + s4 = vec_ld(0x40, (int16_t*)dct); + s5 = vec_ld(0x50, (int16_t*)dct); + s6 = vec_ld(0x60, (int16_t*)dct); + s7 = vec_ld(0x70, (int16_t*)dct); + + IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, + d0, d1, d2, d3, d4, d5, d6, d7); + + TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 ); + + IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7, + idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7); + + ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel); + ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel); + ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel); + ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel); + ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel); + ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel); + ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel); + ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel); +} + +void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) { + +#ifdef HAVE_ALTIVEC + if (has_altivec()) { + c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec; + c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec; + c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec; + c->h264_idct8_add = ff_h264_idct8_add_altivec; + +#define dspfunc(PFX, IDX, NUM) \ + c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \ + c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \ + c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \ + c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \ + c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \ + c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \ + c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \ + c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \ + c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \ + c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \ + c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \ + c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \ + c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \ + c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \ + c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \ + c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec + + dspfunc(put_h264_qpel, 0, 16); + dspfunc(avg_h264_qpel, 0, 16); +#undef dspfunc + + } else +#endif /* HAVE_ALTIVEC */ + { + // Non-AltiVec PPC optimisations + + // ... pending ... + } +} diff --git a/src/libffmpeg/libavcodec/ppc/h264_template_altivec.c b/src/libffmpeg/libavcodec/ppc/h264_template_altivec.c new file mode 100644 index 000000000..e8ad67f2f --- /dev/null +++ b/src/libffmpeg/libavcodec/ppc/h264_template_altivec.c @@ -0,0 +1,719 @@ +/* + * Copyright (c) 2004 Romain Dolbeau + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/* this code assume that stride % 16 == 0 */ +void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) { + POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1); + signed int ABCD[4] __attribute__((aligned(16))) = + {((8 - x) * (8 - y)), + ((x) * (8 - y)), + ((8 - x) * (y)), + ((x) * (y))}; + register int i; + vector unsigned char fperm; + const vector signed int vABCD = vec_ld(0, ABCD); + const vector signed short vA = vec_splat((vector signed short)vABCD, 1); + const vector signed short vB = vec_splat((vector signed short)vABCD, 3); + const vector signed short vC = vec_splat((vector signed short)vABCD, 5); + const vector signed short vD = vec_splat((vector signed short)vABCD, 7); + const vector signed int vzero = vec_splat_s32(0); + const vector signed short v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5)); + const vector unsigned short v6us = vec_splat_u16(6); + register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; + register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; + + vector unsigned char vsrcAuc, vsrcBuc, vsrcperm0, vsrcperm1; + vector unsigned char vsrc0uc, vsrc1uc; + vector signed short vsrc0ssH, vsrc1ssH; + vector unsigned char vsrcCuc, vsrc2uc, vsrc3uc; + vector signed short vsrc2ssH, vsrc3ssH, psum; + vector unsigned char vdst, ppsum, vfdst, fsum; + + POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1); + + if (((unsigned long)dst) % 16 == 0) { + fperm = (vector unsigned char)AVV(0x10, 0x11, 0x12, 0x13, + 0x14, 0x15, 0x16, 0x17, + 0x08, 0x09, 0x0A, 0x0B, + 0x0C, 0x0D, 0x0E, 0x0F); + } else { + fperm = (vector unsigned char)AVV(0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, + 0x18, 0x19, 0x1A, 0x1B, + 0x1C, 0x1D, 0x1E, 0x1F); + } + + vsrcAuc = vec_ld(0, src); + + if (loadSecond) + vsrcBuc = vec_ld(16, src); + vsrcperm0 = vec_lvsl(0, src); + vsrcperm1 = vec_lvsl(1, src); + + vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); + if (reallyBadAlign) + vsrc1uc = vsrcBuc; + else + vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); + + vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc0uc); + vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc1uc); + + if (!loadSecond) {// -> !reallyBadAlign + for (i = 0 ; i < h ; i++) { + + + vsrcCuc = vec_ld(stride + 0, src); + + vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); + vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); + + vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc2uc); + vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc3uc); + + psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); + psum = vec_mladd(vB, vsrc1ssH, psum); + psum = vec_mladd(vC, vsrc2ssH, psum); + psum = vec_mladd(vD, vsrc3ssH, psum); + psum = vec_add(v32ss, psum); + psum = vec_sra(psum, v6us); + + vdst = vec_ld(0, dst); + ppsum = (vector unsigned char)vec_packsu(psum, psum); + vfdst = vec_perm(vdst, ppsum, fperm); + + OP_U8_ALTIVEC(fsum, vfdst, vdst); + + vec_st(fsum, 0, dst); + + vsrc0ssH = vsrc2ssH; + vsrc1ssH = vsrc3ssH; + + dst += stride; + src += stride; + } + } else { + vector unsigned char vsrcDuc; + for (i = 0 ; i < h ; i++) { + vsrcCuc = vec_ld(stride + 0, src); + vsrcDuc = vec_ld(stride + 16, src); + + vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); + if (reallyBadAlign) + vsrc3uc = vsrcDuc; + else + vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); + + vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc2uc); + vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, + (vector unsigned char)vsrc3uc); + + psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); + psum = vec_mladd(vB, vsrc1ssH, psum); + psum = vec_mladd(vC, vsrc2ssH, psum); + psum = vec_mladd(vD, vsrc3ssH, psum); + psum = vec_add(v32ss, psum); + psum = vec_sr(psum, v6us); + + vdst = vec_ld(0, dst); + ppsum = (vector unsigned char)vec_pack(psum, psum); + vfdst = vec_perm(vdst, ppsum, fperm); + + OP_U8_ALTIVEC(fsum, vfdst, vdst); + + vec_st(fsum, 0, dst); + + vsrc0ssH = vsrc2ssH; + vsrc1ssH = vsrc3ssH; + + dst += stride; + src += stride; + } + } + POWERPC_PERF_STOP_COUNT(PREFIX_h264_chroma_mc8_num, 1); +} + +/* this code assume stride % 16 == 0 */ +static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { + POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1); + register int i; + + const vector signed int vzero = vec_splat_s32(0); + const vector unsigned char permM2 = vec_lvsl(-2, src); + const vector unsigned char permM1 = vec_lvsl(-1, src); + const vector unsigned char permP0 = vec_lvsl(+0, src); + const vector unsigned char permP1 = vec_lvsl(+1, src); + const vector unsigned char permP2 = vec_lvsl(+2, src); + const vector unsigned char permP3 = vec_lvsl(+3, src); + const vector signed short v5ss = vec_splat_s16(5); + const vector unsigned short v5us = vec_splat_u16(5); + const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); + const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4)); + const vector unsigned char dstperm = vec_lvsr(0, dst); + const vector unsigned char neg1 = + (const vector unsigned char) vec_splat_s8(-1); + + const vector unsigned char dstmask = + vec_perm((const vector unsigned char)vzero, + neg1, dstperm); + + vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; + + register int align = ((((unsigned long)src) - 2) % 16); + + vector signed short srcP0A, srcP0B, srcP1A, srcP1B, + srcP2A, srcP2B, srcP3A, srcP3B, + srcM1A, srcM1B, srcM2A, srcM2B, + sum1A, sum1B, sum2A, sum2B, sum3A, sum3B, + pp1A, pp1B, pp2A, pp2B, pp3A, pp3B, + psumA, psumB, sumA, sumB; + + vector unsigned char sum, dst1, dst2, vdst, fsum, + rsum, fdst1, fdst2; + + POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); + + for (i = 0 ; i < 16 ; i ++) { + vector unsigned char srcR1 = vec_ld(-2, src); + vector unsigned char srcR2 = vec_ld(14, src); + + switch (align) { + default: { + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = vec_perm(srcR1, srcR2, permP0); + srcP1 = vec_perm(srcR1, srcR2, permP1); + srcP2 = vec_perm(srcR1, srcR2, permP2); + srcP3 = vec_perm(srcR1, srcR2, permP3); + } break; + case 11: { + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = vec_perm(srcR1, srcR2, permP0); + srcP1 = vec_perm(srcR1, srcR2, permP1); + srcP2 = vec_perm(srcR1, srcR2, permP2); + srcP3 = srcR2; + } break; + case 12: { + vector unsigned char srcR3 = vec_ld(30, src); + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = vec_perm(srcR1, srcR2, permP0); + srcP1 = vec_perm(srcR1, srcR2, permP1); + srcP2 = srcR2; + srcP3 = vec_perm(srcR2, srcR3, permP3); + } break; + case 13: { + vector unsigned char srcR3 = vec_ld(30, src); + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = vec_perm(srcR1, srcR2, permP0); + srcP1 = srcR2; + srcP2 = vec_perm(srcR2, srcR3, permP2); + srcP3 = vec_perm(srcR2, srcR3, permP3); + } break; + case 14: { + vector unsigned char srcR3 = vec_ld(30, src); + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = srcR2; + srcP1 = vec_perm(srcR2, srcR3, permP1); + srcP2 = vec_perm(srcR2, srcR3, permP2); + srcP3 = vec_perm(srcR2, srcR3, permP3); + } break; + case 15: { + vector unsigned char srcR3 = vec_ld(30, src); + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = srcR2; + srcP0 = vec_perm(srcR2, srcR3, permP0); + srcP1 = vec_perm(srcR2, srcR3, permP1); + srcP2 = vec_perm(srcR2, srcR3, permP2); + srcP3 = vec_perm(srcR2, srcR3, permP3); + } break; + } + + srcP0A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP0); + srcP0B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP0); + srcP1A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP1); + srcP1B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP1); + + srcP2A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP2); + srcP2B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP2); + srcP3A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP3); + srcP3B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP3); + + srcM1A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM1); + srcM1B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM1); + srcM2A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM2); + srcM2B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM2); + + sum1A = vec_adds(srcP0A, srcP1A); + sum1B = vec_adds(srcP0B, srcP1B); + sum2A = vec_adds(srcM1A, srcP2A); + sum2B = vec_adds(srcM1B, srcP2B); + sum3A = vec_adds(srcM2A, srcP3A); + sum3B = vec_adds(srcM2B, srcP3B); + + pp1A = vec_mladd(sum1A, v20ss, v16ss); + pp1B = vec_mladd(sum1B, v20ss, v16ss); + + pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); + pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); + + pp3A = vec_add(sum3A, pp1A); + pp3B = vec_add(sum3B, pp1B); + + psumA = vec_sub(pp3A, pp2A); + psumB = vec_sub(pp3B, pp2B); + + sumA = vec_sra(psumA, v5us); + sumB = vec_sra(psumB, v5us); + + sum = vec_packsu(sumA, sumB); + + dst1 = vec_ld(0, dst); + dst2 = vec_ld(16, dst); + vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); + + OP_U8_ALTIVEC(fsum, sum, vdst); + + rsum = vec_perm(fsum, fsum, dstperm); + fdst1 = vec_sel(dst1, rsum, dstmask); + fdst2 = vec_sel(rsum, dst2, dstmask); + + vec_st(fdst1, 0, dst); + vec_st(fdst2, 16, dst); + + src += srcStride; + dst += dstStride; + } +POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); +} + +/* this code assume stride % 16 == 0 */ +static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { + POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1); + + register int i; + + const vector signed int vzero = vec_splat_s32(0); + const vector unsigned char perm = vec_lvsl(0, src); + const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); + const vector unsigned short v5us = vec_splat_u16(5); + const vector signed short v5ss = vec_splat_s16(5); + const vector signed short v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4)); + const vector unsigned char dstperm = vec_lvsr(0, dst); + const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); + const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); + + uint8_t *srcbis = src - (srcStride * 2); + + const vector unsigned char srcM2a = vec_ld(0, srcbis); + const vector unsigned char srcM2b = vec_ld(16, srcbis); + const vector unsigned char srcM2 = vec_perm(srcM2a, srcM2b, perm); +// srcbis += srcStride; + const vector unsigned char srcM1a = vec_ld(0, srcbis += srcStride); + const vector unsigned char srcM1b = vec_ld(16, srcbis); + const vector unsigned char srcM1 = vec_perm(srcM1a, srcM1b, perm); +// srcbis += srcStride; + const vector unsigned char srcP0a = vec_ld(0, srcbis += srcStride); + const vector unsigned char srcP0b = vec_ld(16, srcbis); + const vector unsigned char srcP0 = vec_perm(srcP0a, srcP0b, perm); +// srcbis += srcStride; + const vector unsigned char srcP1a = vec_ld(0, srcbis += srcStride); + const vector unsigned char srcP1b = vec_ld(16, srcbis); + const vector unsigned char srcP1 = vec_perm(srcP1a, srcP1b, perm); +// srcbis += srcStride; + const vector unsigned char srcP2a = vec_ld(0, srcbis += srcStride); + const vector unsigned char srcP2b = vec_ld(16, srcbis); + const vector unsigned char srcP2 = vec_perm(srcP2a, srcP2b, perm); +// srcbis += srcStride; + + vector signed short srcM2ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM2); + vector signed short srcM2ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM2); + vector signed short srcM1ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM1); + vector signed short srcM1ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM1); + vector signed short srcP0ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP0); + vector signed short srcP0ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP0); + vector signed short srcP1ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP1); + vector signed short srcP1ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP1); + vector signed short srcP2ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP2); + vector signed short srcP2ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP2); + + vector signed short pp1A, pp1B, pp2A, pp2B, pp3A, pp3B, + psumA, psumB, sumA, sumB, + srcP3ssA, srcP3ssB, + sum1A, sum1B, sum2A, sum2B, sum3A, sum3B; + + vector unsigned char sum, dst1, dst2, vdst, fsum, rsum, fdst1, fdst2, + srcP3a, srcP3b, srcP3; + + POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); + + for (i = 0 ; i < 16 ; i++) { + srcP3a = vec_ld(0, srcbis += srcStride); + srcP3b = vec_ld(16, srcbis); + srcP3 = vec_perm(srcP3a, srcP3b, perm); + srcP3ssA = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP3); + srcP3ssB = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP3); +// srcbis += srcStride; + + sum1A = vec_adds(srcP0ssA, srcP1ssA); + sum1B = vec_adds(srcP0ssB, srcP1ssB); + sum2A = vec_adds(srcM1ssA, srcP2ssA); + sum2B = vec_adds(srcM1ssB, srcP2ssB); + sum3A = vec_adds(srcM2ssA, srcP3ssA); + sum3B = vec_adds(srcM2ssB, srcP3ssB); + + srcM2ssA = srcM1ssA; + srcM2ssB = srcM1ssB; + srcM1ssA = srcP0ssA; + srcM1ssB = srcP0ssB; + srcP0ssA = srcP1ssA; + srcP0ssB = srcP1ssB; + srcP1ssA = srcP2ssA; + srcP1ssB = srcP2ssB; + srcP2ssA = srcP3ssA; + srcP2ssB = srcP3ssB; + + pp1A = vec_mladd(sum1A, v20ss, v16ss); + pp1B = vec_mladd(sum1B, v20ss, v16ss); + + pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); + pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); + + pp3A = vec_add(sum3A, pp1A); + pp3B = vec_add(sum3B, pp1B); + + psumA = vec_sub(pp3A, pp2A); + psumB = vec_sub(pp3B, pp2B); + + sumA = vec_sra(psumA, v5us); + sumB = vec_sra(psumB, v5us); + + sum = vec_packsu(sumA, sumB); + + dst1 = vec_ld(0, dst); + dst2 = vec_ld(16, dst); + vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); + + OP_U8_ALTIVEC(fsum, sum, vdst); + + rsum = vec_perm(fsum, fsum, dstperm); + fdst1 = vec_sel(dst1, rsum, dstmask); + fdst2 = vec_sel(rsum, dst2, dstmask); + + vec_st(fdst1, 0, dst); + vec_st(fdst2, 16, dst); + + dst += dstStride; + } + POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); +} + +/* this code assume stride % 16 == 0 *and* tmp is properly aligned */ +static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) { + POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_hv_lowpass_num, 1); + register int i; + const vector signed int vzero = vec_splat_s32(0); + const vector unsigned char permM2 = vec_lvsl(-2, src); + const vector unsigned char permM1 = vec_lvsl(-1, src); + const vector unsigned char permP0 = vec_lvsl(+0, src); + const vector unsigned char permP1 = vec_lvsl(+1, src); + const vector unsigned char permP2 = vec_lvsl(+2, src); + const vector unsigned char permP3 = vec_lvsl(+3, src); + const vector signed short v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2)); + const vector unsigned int v10ui = vec_splat_u32(10); + const vector signed short v5ss = vec_splat_s16(5); + const vector signed short v1ss = vec_splat_s16(1); + const vector signed int v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9)); + const vector unsigned int v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4)); + + register int align = ((((unsigned long)src) - 2) % 16); + + const vector unsigned char neg1 = (const vector unsigned char) + vec_splat_s8(-1); + + vector signed short srcP0A, srcP0B, srcP1A, srcP1B, + srcP2A, srcP2B, srcP3A, srcP3B, + srcM1A, srcM1B, srcM2A, srcM2B, + sum1A, sum1B, sum2A, sum2B, sum3A, sum3B, + pp1A, pp1B, pp2A, pp2B, psumA, psumB; + + const vector unsigned char dstperm = vec_lvsr(0, dst); + + const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); + + const vector unsigned char mperm = (const vector unsigned char) + AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B, + 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F); + int16_t *tmpbis = tmp; + + vector signed short tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB, + tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB, + tmpP2ssA, tmpP2ssB; + + vector signed int pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo, + pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo, + pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo, + ssumAe, ssumAo, ssumBe, ssumBo; + vector unsigned char fsum, sumv, sum, dst1, dst2, vdst, + rsum, fdst1, fdst2; + vector signed short ssume, ssumo; + + POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1); + src -= (2 * srcStride); + for (i = 0 ; i < 21 ; i ++) { + vector unsigned char srcM2, srcM1, srcP0, srcP1, srcP2, srcP3; + vector unsigned char srcR1 = vec_ld(-2, src); + vector unsigned char srcR2 = vec_ld(14, src); + + switch (align) { + default: { + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = vec_perm(srcR1, srcR2, permP0); + srcP1 = vec_perm(srcR1, srcR2, permP1); + srcP2 = vec_perm(srcR1, srcR2, permP2); + srcP3 = vec_perm(srcR1, srcR2, permP3); + } break; + case 11: { + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = vec_perm(srcR1, srcR2, permP0); + srcP1 = vec_perm(srcR1, srcR2, permP1); + srcP2 = vec_perm(srcR1, srcR2, permP2); + srcP3 = srcR2; + } break; + case 12: { + vector unsigned char srcR3 = vec_ld(30, src); + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = vec_perm(srcR1, srcR2, permP0); + srcP1 = vec_perm(srcR1, srcR2, permP1); + srcP2 = srcR2; + srcP3 = vec_perm(srcR2, srcR3, permP3); + } break; + case 13: { + vector unsigned char srcR3 = vec_ld(30, src); + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = vec_perm(srcR1, srcR2, permP0); + srcP1 = srcR2; + srcP2 = vec_perm(srcR2, srcR3, permP2); + srcP3 = vec_perm(srcR2, srcR3, permP3); + } break; + case 14: { + vector unsigned char srcR3 = vec_ld(30, src); + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = vec_perm(srcR1, srcR2, permM1); + srcP0 = srcR2; + srcP1 = vec_perm(srcR2, srcR3, permP1); + srcP2 = vec_perm(srcR2, srcR3, permP2); + srcP3 = vec_perm(srcR2, srcR3, permP3); + } break; + case 15: { + vector unsigned char srcR3 = vec_ld(30, src); + srcM2 = vec_perm(srcR1, srcR2, permM2); + srcM1 = srcR2; + srcP0 = vec_perm(srcR2, srcR3, permP0); + srcP1 = vec_perm(srcR2, srcR3, permP1); + srcP2 = vec_perm(srcR2, srcR3, permP2); + srcP3 = vec_perm(srcR2, srcR3, permP3); + } break; + } + + srcP0A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP0); + srcP0B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP0); + srcP1A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP1); + srcP1B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP1); + + srcP2A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP2); + srcP2B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP2); + srcP3A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcP3); + srcP3B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcP3); + + srcM1A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM1); + srcM1B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM1); + srcM2A = (vector signed short) + vec_mergeh((vector unsigned char)vzero, srcM2); + srcM2B = (vector signed short) + vec_mergel((vector unsigned char)vzero, srcM2); + + sum1A = vec_adds(srcP0A, srcP1A); + sum1B = vec_adds(srcP0B, srcP1B); + sum2A = vec_adds(srcM1A, srcP2A); + sum2B = vec_adds(srcM1B, srcP2B); + sum3A = vec_adds(srcM2A, srcP3A); + sum3B = vec_adds(srcM2B, srcP3B); + + pp1A = vec_mladd(sum1A, v20ss, sum3A); + pp1B = vec_mladd(sum1B, v20ss, sum3B); + + pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); + pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); + + psumA = vec_sub(pp1A, pp2A); + psumB = vec_sub(pp1B, pp2B); + + vec_st(psumA, 0, tmp); + vec_st(psumB, 16, tmp); + + src += srcStride; + tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */ + } + + tmpM2ssA = vec_ld(0, tmpbis); + tmpM2ssB = vec_ld(16, tmpbis); + tmpbis += tmpStride; + tmpM1ssA = vec_ld(0, tmpbis); + tmpM1ssB = vec_ld(16, tmpbis); + tmpbis += tmpStride; + tmpP0ssA = vec_ld(0, tmpbis); + tmpP0ssB = vec_ld(16, tmpbis); + tmpbis += tmpStride; + tmpP1ssA = vec_ld(0, tmpbis); + tmpP1ssB = vec_ld(16, tmpbis); + tmpbis += tmpStride; + tmpP2ssA = vec_ld(0, tmpbis); + tmpP2ssB = vec_ld(16, tmpbis); + tmpbis += tmpStride; + + for (i = 0 ; i < 16 ; i++) { + const vector signed short tmpP3ssA = vec_ld(0, tmpbis); + const vector signed short tmpP3ssB = vec_ld(16, tmpbis); + + const vector signed short sum1A = vec_adds(tmpP0ssA, tmpP1ssA); + const vector signed short sum1B = vec_adds(tmpP0ssB, tmpP1ssB); + const vector signed short sum2A = vec_adds(tmpM1ssA, tmpP2ssA); + const vector signed short sum2B = vec_adds(tmpM1ssB, tmpP2ssB); + const vector signed short sum3A = vec_adds(tmpM2ssA, tmpP3ssA); + const vector signed short sum3B = vec_adds(tmpM2ssB, tmpP3ssB); + + tmpbis += tmpStride; + + tmpM2ssA = tmpM1ssA; + tmpM2ssB = tmpM1ssB; + tmpM1ssA = tmpP0ssA; + tmpM1ssB = tmpP0ssB; + tmpP0ssA = tmpP1ssA; + tmpP0ssB = tmpP1ssB; + tmpP1ssA = tmpP2ssA; + tmpP1ssB = tmpP2ssB; + tmpP2ssA = tmpP3ssA; + tmpP2ssB = tmpP3ssB; + + pp1Ae = vec_mule(sum1A, v20ss); + pp1Ao = vec_mulo(sum1A, v20ss); + pp1Be = vec_mule(sum1B, v20ss); + pp1Bo = vec_mulo(sum1B, v20ss); + + pp2Ae = vec_mule(sum2A, v5ss); + pp2Ao = vec_mulo(sum2A, v5ss); + pp2Be = vec_mule(sum2B, v5ss); + pp2Bo = vec_mulo(sum2B, v5ss); + + pp3Ae = vec_sra((vector signed int)sum3A, v16ui); + pp3Ao = vec_mulo(sum3A, v1ss); + pp3Be = vec_sra((vector signed int)sum3B, v16ui); + pp3Bo = vec_mulo(sum3B, v1ss); + + pp1cAe = vec_add(pp1Ae, v512si); + pp1cAo = vec_add(pp1Ao, v512si); + pp1cBe = vec_add(pp1Be, v512si); + pp1cBo = vec_add(pp1Bo, v512si); + + pp32Ae = vec_sub(pp3Ae, pp2Ae); + pp32Ao = vec_sub(pp3Ao, pp2Ao); + pp32Be = vec_sub(pp3Be, pp2Be); + pp32Bo = vec_sub(pp3Bo, pp2Bo); + + sumAe = vec_add(pp1cAe, pp32Ae); + sumAo = vec_add(pp1cAo, pp32Ao); + sumBe = vec_add(pp1cBe, pp32Be); + sumBo = vec_add(pp1cBo, pp32Bo); + + ssumAe = vec_sra(sumAe, v10ui); + ssumAo = vec_sra(sumAo, v10ui); + ssumBe = vec_sra(sumBe, v10ui); + ssumBo = vec_sra(sumBo, v10ui); + + ssume = vec_packs(ssumAe, ssumBe); + ssumo = vec_packs(ssumAo, ssumBo); + + sumv = vec_packsu(ssume, ssumo); + sum = vec_perm(sumv, sumv, mperm); + + dst1 = vec_ld(0, dst); + dst2 = vec_ld(16, dst); + vdst = vec_perm(dst1, dst2, vec_lvsl(0, dst)); + + OP_U8_ALTIVEC(fsum, sum, vdst); + + rsum = vec_perm(fsum, fsum, dstperm); + fdst1 = vec_sel(dst1, rsum, dstmask); + fdst2 = vec_sel(rsum, dst2, dstmask); + + vec_st(fdst1, 0, dst); + vec_st(fdst2, 16, dst); + + dst += dstStride; + } + POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_hv_lowpass_num, 1); +} diff --git a/src/libffmpeg/libavcodec/ppc/mathops.h b/src/libffmpeg/libavcodec/ppc/mathops.h new file mode 100644 index 000000000..6af23f246 --- /dev/null +++ b/src/libffmpeg/libavcodec/ppc/mathops.h @@ -0,0 +1,33 @@ +/* + * simple math operations + * Copyright (c) 2001, 2002 Fabrice Bellard. + * Copyright (c) 2006 Michael Niedermayer et al + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if defined(ARCH_POWERPC_405) +/* signed 16x16 -> 32 multiply add accumulate */ +# define MAC16(rt, ra, rb) \ + asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); + +/* signed 16x16 -> 32 multiply */ +# define MUL16(ra, rb) \ + ({ int __rt; + asm ("mullhw %0, %1, %2" : "=r" (__rt) : "r" (ra), "r" (rb)); + __rt; }) +#endif diff --git a/src/libffmpeg/libavcodec/ppc/snow_altivec.c b/src/libffmpeg/libavcodec/ppc/snow_altivec.c new file mode 100644 index 000000000..b15672ffe --- /dev/null +++ b/src/libffmpeg/libavcodec/ppc/snow_altivec.c @@ -0,0 +1,788 @@ +/* + * Altivec optimized snow DSP utils + * Copyright (c) 2006 Luca Barbato + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * + */ + +#include "../dsputil.h" + +#include "gcc_fixes.h" +#include "dsputil_altivec.h" +#include "../snow.h" + +#undef NDEBUG +#include + + + +//FIXME remove this replication +#define slice_buffer_get_line(slice_buf, line_num) ((slice_buf)->line[line_num] ? (slice_buf)->line[line_num] : slice_buffer_load_line((slice_buf), (line_num))) + +static DWTELEM * slice_buffer_load_line(slice_buffer * buf, int line) +{ + int offset; + DWTELEM * buffer; + +// av_log(NULL, AV_LOG_DEBUG, "Cache hit: %d\n", line); + + assert(buf->data_stack_top >= 0); +// assert(!buf->line[line]); + if (buf->line[line]) + return buf->line[line]; + + offset = buf->line_width * line; + buffer = buf->data_stack[buf->data_stack_top]; + buf->data_stack_top--; + buf->line[line] = buffer; + +// av_log(NULL, AV_LOG_DEBUG, "slice_buffer_load_line: line: %d remaining: %d\n", line, buf->data_stack_top + 1); + + return buffer; +} + + +//altivec code + +void ff_snow_horizontal_compose97i_altivec(DWTELEM *b, int width) +{ + const int w2= (width+1)>>1; + DECLARE_ALIGNED_16(DWTELEM, temp[(width>>1)]); + const int w_l= (width>>1); + const int w_r= w2 - 1; + int i; + vector signed int t1, t2, x, y, tmp1, tmp2; + vector signed int *vbuf, *vtmp; + vector unsigned char align; + + + + { // Lift 0 + DWTELEM * const ref = b + w2 - 1; + DWTELEM b_0 = b[0]; + vbuf = (vector signed int *)b; + + tmp1 = vec_ld (0, ref); + align = vec_lvsl (0, ref); + tmp2 = vec_ld (15, ref); + t1= vec_perm(tmp1, tmp2, align); + + i = 0; + + for (i=0; i> 3); + b[i+1] = b[i+1] - ((3 * (ref[i+1] + ref[i+2]) + 4) >> 3); + b[i+2] = b[i+2] - ((3 * (ref[i+2] + ref[i+3]) + 4) >> 3); + b[i+3] = b[i+3] - ((3 * (ref[i+3] + ref[i+4]) + 4) >> 3); +#else + + tmp1 = vec_ld (0, ref+4+i); + tmp2 = vec_ld (15, ref+4+i); + + t2 = vec_perm(tmp1, tmp2, align); + + y = vec_add(t1,vec_sld(t1,t2,4)); + y = vec_add(vec_add(y,y),y); + + tmp1 = vec_ld (0, ref+8+i); + + y = vec_add(y, vec_splat_s32(4)); + y = vec_sra(y, vec_splat_u32(3)); + + tmp2 = vec_ld (15, ref+8+i); + + *vbuf = vec_sub(*vbuf, y); + + t1=t2; + + vbuf++; + + t2 = vec_perm(tmp1, tmp2, align); + + y = vec_add(t1,vec_sld(t1,t2,4)); + y = vec_add(vec_add(y,y),y); + + tmp1 = vec_ld (0, ref+12+i); + + y = vec_add(y, vec_splat_s32(4)); + y = vec_sra(y, vec_splat_u32(3)); + + tmp2 = vec_ld (15, ref+12+i); + + *vbuf = vec_sub(*vbuf, y); + + t1=t2; + + vbuf++; + + t2 = vec_perm(tmp1, tmp2, align); + + y = vec_add(t1,vec_sld(t1,t2,4)); + y = vec_add(vec_add(y,y),y); + + tmp1 = vec_ld (0, ref+16+i); + + y = vec_add(y, vec_splat_s32(4)); + y = vec_sra(y, vec_splat_u32(3)); + + tmp2 = vec_ld (15, ref+16+i); + + *vbuf = vec_sub(*vbuf, y); + + t1=t2; + + t2 = vec_perm(tmp1, tmp2, align); + + y = vec_add(t1,vec_sld(t1,t2,4)); + y = vec_add(vec_add(y,y),y); + + vbuf++; + + y = vec_add(y, vec_splat_s32(4)); + y = vec_sra(y, vec_splat_u32(3)); + *vbuf = vec_sub(*vbuf, y); + + t1=t2; + + vbuf++; + +#endif + } + + snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); + b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); + } + + { // Lift 1 + DWTELEM * const dst = b+w2; + + i = 0; + for(; (((long)&dst[i]) & 0xF) && i> 4); + b[i+1] = b[i+1] - (((8 -(ref[i+1] + ref[i+2])) - (b[i+1]<<2)) >> 4); + b[i+2] = b[i+2] - (((8 -(ref[i+2] + ref[i+3])) - (b[i+2]<<2)) >> 4); + b[i+3] = b[i+3] - (((8 -(ref[i+3] + ref[i+4])) - (b[i+3]<<2)) >> 4); +#else + tmp1 = vec_ld (0, ref+4+i); + tmp2 = vec_ld (15, ref+4+i); + + t2 = vec_perm(tmp1, tmp2, align); + + y = vec_add(t1,vec_sld(t1,t2,4)); + y = vec_sub(vec_splat_s32(8),y); + + tmp1 = vec_ld (0, ref+8+i); + + x = vec_sl(*vbuf,vec_splat_u32(2)); + y = vec_sra(vec_sub(y,x),vec_splat_u32(4)); + + tmp2 = vec_ld (15, ref+8+i); + + *vbuf = vec_sub( *vbuf, y); + + t1 = t2; + + vbuf++; + + t2 = vec_perm(tmp1, tmp2, align); + + y = vec_add(t1,vec_sld(t1,t2,4)); + y = vec_sub(vec_splat_s32(8),y); + + tmp1 = vec_ld (0, ref+12+i); + + x = vec_sl(*vbuf,vec_splat_u32(2)); + y = vec_sra(vec_sub(y,x),vec_splat_u32(4)); + + tmp2 = vec_ld (15, ref+12+i); + + *vbuf = vec_sub( *vbuf, y); + + t1 = t2; + + vbuf++; + + t2 = vec_perm(tmp1, tmp2, align); + + y = vec_add(t1,vec_sld(t1,t2,4)); + y = vec_sub(vec_splat_s32(8),y); + + tmp1 = vec_ld (0, ref+16+i); + + x = vec_sl(*vbuf,vec_splat_u32(2)); + y = vec_sra(vec_sub(y,x),vec_splat_u32(4)); + + tmp2 = vec_ld (15, ref+16+i); + + *vbuf = vec_sub( *vbuf, y); + + t1 = t2; + + vbuf++; + + t2 = vec_perm(tmp1, tmp2, align); + + y = vec_add(t1,vec_sld(t1,t2,4)); + y = vec_sub(vec_splat_s32(8),y); + + t1 = t2; + + x = vec_sl(*vbuf,vec_splat_u32(2)); + y = vec_sra(vec_sub(y,x),vec_splat_u32(4)); + *vbuf = vec_sub( *vbuf, y); + + vbuf++; + +#endif + } + + snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); + b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS); + } + + { // Lift 3 + DWTELEM * const src = b+w2; + + vbuf = (vector signed int *)b; + vtmp = (vector signed int *)temp; + + i = 0; + align = vec_lvsl(0, src); + + for (; i>1); + temp[i+1] = src[i+1] - ((-3*(b[i+1] + b[i+2]))>>1); + temp[i+2] = src[i+2] - ((-3*(b[i+2] + b[i+3]))>>1); + temp[i+3] = src[i+3] - ((-3*(b[i+3] + b[i+4]))>>1); +#else + tmp1 = vec_ld(0,src+i); + t1 = vec_add(vbuf[0],vec_sld(vbuf[0],vbuf[1],4)); + tmp2 = vec_ld(15,src+i); + t1 = vec_sub(vec_splat_s32(0),t1); //bad! + t1 = vec_add(t1,vec_add(t1,t1)); + t2 = vec_perm(tmp1 ,tmp2 ,align); + t1 = vec_sra(t1,vec_splat_u32(1)); + vbuf++; + *vtmp = vec_sub(t2,t1); + vtmp++; + +#endif + + } + + snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -3, 0, 1); + } + + { + //Interleave + int a; + vector signed int *t = (vector signed int *)temp, + *v = (vector signed int *)b; + + snow_interleave_line_header(&i, width, b, temp); + + for (; (i & 0xE) != 0xE; i-=2){ + b[i+1] = temp[i>>1]; + b[i] = b[i>>1]; + } + for (i-=14; i>=0; i-=16){ + a=i/4; + + v[a+3]=vec_mergel(v[(a>>1)+1],t[(a>>1)+1]); + v[a+2]=vec_mergeh(v[(a>>1)+1],t[(a>>1)+1]); + v[a+1]=vec_mergel(v[a>>1],t[a>>1]); + v[a]=vec_mergeh(v[a>>1],t[a>>1]); + + } + + } +} + +void ff_snow_vertical_compose97i_altivec(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width) +{ + int i, w4 = width/4; + vector signed int *v0, *v1,*v2,*v3,*v4,*v5; + vector signed int t1, t2; + + v0=(vector signed int *)b0; + v1=(vector signed int *)b1; + v2=(vector signed int *)b2; + v3=(vector signed int *)b3; + v4=(vector signed int *)b4; + v5=(vector signed int *)b5; + + for (i=0; i< w4;i++) + { + + #if 0 + b4[i] -= (3*(b3[i] + b5[i])+4)>>3; + b3[i] -= ((b2[i] + b4[i])); + b2[i] += ((b1[i] + b3[i])+4*b2[i]+8)>>4; + b1[i] += (3*(b0[i] + b2[i]))>>1; + #else + t1 = vec_add(v3[i], v5[i]); + t2 = vec_add(t1, vec_add(t1,t1)); + t1 = vec_add(t2, vec_splat_s32(4)); + v4[i] = vec_sub(v4[i], vec_sra(t1,vec_splat_u32(3))); + + v3[i] = vec_sub(v3[i], vec_add(v2[i], v4[i])); + + t1 = vec_add(vec_splat_s32(8), vec_add(v1[i], v3[i])); + t2 = vec_sl(v2[i], vec_splat_u32(2)); + v2[i] = vec_add(v2[i], vec_sra(vec_add(t1,t2),vec_splat_u32(4))); + t1 = vec_add(v0[i], v2[i]); + t2 = vec_add(t1, vec_add(t1,t1)); + v1[i] = vec_add(v1[i], vec_sra(t2,vec_splat_u32(1))); + + #endif + } + + for(i*=4; i < width; i++) + { + b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; + b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS; + b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS; + b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; + } +} + +#define LOAD_BLOCKS \ + tmp1 = vec_ld(0, &block[3][y*src_stride]);\ + align = vec_lvsl(0, &block[3][y*src_stride]);\ + tmp2 = vec_ld(15, &block[3][y*src_stride]);\ +\ + b3 = vec_perm(tmp1,tmp2,align);\ +\ + tmp1 = vec_ld(0, &block[2][y*src_stride]);\ + align = vec_lvsl(0, &block[2][y*src_stride]);\ + tmp2 = vec_ld(15, &block[2][y*src_stride]);\ +\ + b2 = vec_perm(tmp1,tmp2,align);\ +\ + tmp1 = vec_ld(0, &block[1][y*src_stride]);\ + align = vec_lvsl(0, &block[1][y*src_stride]);\ + tmp2 = vec_ld(15, &block[1][y*src_stride]);\ +\ + b1 = vec_perm(tmp1,tmp2,align);\ +\ + tmp1 = vec_ld(0, &block[0][y*src_stride]);\ + align = vec_lvsl(0, &block[0][y*src_stride]);\ + tmp2 = vec_ld(15, &block[0][y*src_stride]);\ +\ + b0 = vec_perm(tmp1,tmp2,align); + +#define LOAD_OBMCS \ + tmp1 = vec_ld(0, obmc1);\ + align = vec_lvsl(0, obmc1);\ + tmp2 = vec_ld(15, obmc1);\ +\ + ob1 = vec_perm(tmp1,tmp2,align);\ +\ + tmp1 = vec_ld(0, obmc2);\ + align = vec_lvsl(0, obmc2);\ + tmp2 = vec_ld(15, obmc2);\ +\ + ob2 = vec_perm(tmp1,tmp2,align);\ +\ + tmp1 = vec_ld(0, obmc3);\ + align = vec_lvsl(0, obmc3);\ + tmp2 = vec_ld(15, obmc3);\ +\ + ob3 = vec_perm(tmp1,tmp2,align);\ +\ + tmp1 = vec_ld(0, obmc4);\ + align = vec_lvsl(0, obmc4);\ + tmp2 = vec_ld(15, obmc4);\ +\ + ob4 = vec_perm(tmp1,tmp2,align); + +/* interleave logic + * h1 <- [ a,b,a,b, a,b,a,b, a,b,a,b, a,b,a,b ] + * h2 <- [ c,d,c,d, c,d,c,d, c,d,c,d, c,d,c,d ] + * h <- [ a,b,c,d, a,b,c,d, a,b,c,d, a,b,c,d ] + */ + +#define STEPS_0_1\ + h1 = (vector unsigned short)\ + vec_mergeh(ob1, ob2);\ +\ + h2 = (vector unsigned short)\ + vec_mergeh(ob3, ob4);\ +\ + ih = (vector unsigned char)\ + vec_mergeh(h1,h2);\ +\ + l1 = (vector unsigned short) vec_mergeh(b3, b2);\ +\ + ih1 = (vector unsigned char) vec_mergel(h1, h2);\ +\ + l2 = (vector unsigned short) vec_mergeh(b1, b0);\ +\ + il = (vector unsigned char) vec_mergeh(l1, l2);\ +\ + v[0] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\ +\ + il1 = (vector unsigned char) vec_mergel(l1, l2);\ +\ + v[1] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0)); + +#define FINAL_STEP_SCALAR\ + for(x=0; x> FRAC_BITS;\ + if(vbuf[x]&(~255)) vbuf[x]= ~(vbuf[x]>>31);\ + dst8[x + y*src_stride] = vbuf[x];\ + }else{\ + dst[x + src_x] -= vbuf[x];\ + } + +static void inner_add_yblock_bw_8_obmc_16_altivec(uint8_t *obmc, + const int obmc_stride, + uint8_t * * block, int b_w, + int b_h, int src_x, int src_y, + int src_stride, slice_buffer * sb, + int add, uint8_t * dst8) +{ + int y, x; + DWTELEM * dst; + vector unsigned short h1, h2, l1, l2; + vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align; + vector unsigned char b0,b1,b2,b3; + vector unsigned char ob1,ob2,ob3,ob4; + + DECLARE_ALIGNED_16(int, vbuf[16]); + vector signed int *v = (vector signed int *)vbuf, *d; + + for(y=0; y>1); + uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1); + uint8_t *obmc4= obmc3+ (obmc_stride>>1); + + dst = slice_buffer_get_line(sb, src_y + y); + d = (vector signed int *)(dst + src_x); + +//FIXME i could avoid some loads! + + // load blocks + LOAD_BLOCKS + + // load obmcs + LOAD_OBMCS + + // steps 0 1 + STEPS_0_1 + + FINAL_STEP_SCALAR + + } + +} + +#define STEPS_2_3\ + h1 = (vector unsigned short) vec_mergel(ob1, ob2);\ +\ + h2 = (vector unsigned short) vec_mergel(ob3, ob4);\ +\ + ih = (vector unsigned char) vec_mergeh(h1,h2);\ +\ + l1 = (vector unsigned short) vec_mergel(b3, b2);\ +\ + l2 = (vector unsigned short) vec_mergel(b1, b0);\ +\ + ih1 = (vector unsigned char) vec_mergel(h1,h2);\ +\ + il = (vector unsigned char) vec_mergeh(l1,l2);\ +\ + v[2] = (vector signed int) vec_msum(ih, il, vec_splat_u32(0));\ +\ + il1 = (vector unsigned char) vec_mergel(l1,l2);\ +\ + v[3] = (vector signed int) vec_msum(ih1, il1, vec_splat_u32(0)); + + +static void inner_add_yblock_bw_16_obmc_32_altivec(uint8_t *obmc, + const int obmc_stride, + uint8_t * * block, int b_w, + int b_h, int src_x, int src_y, + int src_stride, slice_buffer * sb, + int add, uint8_t * dst8) +{ + int y, x; + DWTELEM * dst; + vector unsigned short h1, h2, l1, l2; + vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align; + vector unsigned char b0,b1,b2,b3; + vector unsigned char ob1,ob2,ob3,ob4; + DECLARE_ALIGNED_16(int, vbuf[b_w]); + vector signed int *v = (vector signed int *)vbuf, *d; + + for(y=0; y>1); + uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1); + uint8_t *obmc4= obmc3+ (obmc_stride>>1); + + dst = slice_buffer_get_line(sb, src_y + y); + d = (vector signed int *)(dst + src_x); + + // load blocks + LOAD_BLOCKS + + // load obmcs + LOAD_OBMCS + + // steps 0 1 2 3 + STEPS_0_1 + + STEPS_2_3 + + FINAL_STEP_SCALAR + + } +} + +#define FINAL_STEP_VEC \ +\ + if(add)\ + {\ + for(x=0; x>1); + uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1); + uint8_t *obmc4= obmc3+ (obmc_stride>>1); + + dst = slice_buffer_get_line(sb, src_y + y); + d = (vector signed int *)(dst + src_x); + +//FIXME i could avoid some loads! + + // load blocks + LOAD_BLOCKS + + // load obmcs + LOAD_OBMCS + + // steps 0 1 + STEPS_0_1 + + FINAL_STEP_VEC + + } + +} + +static void inner_add_yblock_a_bw_16_obmc_32_altivec(uint8_t *obmc, + const int obmc_stride, + uint8_t * * block, int b_w, + int b_h, int src_x, int src_y, + int src_stride, slice_buffer * sb, + int add, uint8_t * dst8) +{ + int y, x; + DWTELEM * dst; + vector bool int mask; + vector signed int vs; + vector unsigned short h1, h2, l1, l2; + vector unsigned char ih, il, ih1, il1, tmp1, tmp2, align; + vector unsigned char b0,b1,b2,b3; + vector unsigned char ob1,ob2,ob3,ob4; + DECLARE_ALIGNED_16(int, vbuf[b_w]); + vector signed int *v = (vector signed int *)vbuf, *d; + + for(y=0; y>1); + uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1); + uint8_t *obmc4= obmc3+ (obmc_stride>>1); + + dst = slice_buffer_get_line(sb, src_y + y); + d = (vector signed int *)(dst + src_x); + + // load blocks + LOAD_BLOCKS + + // load obmcs + LOAD_OBMCS + + // steps 0 1 2 3 + STEPS_0_1 + + STEPS_2_3 + + FINAL_STEP_VEC + + } +} + + +void ff_snow_inner_add_yblock_altivec(uint8_t *obmc, const int obmc_stride, + uint8_t * * block, int b_w, int b_h, + int src_x, int src_y, int src_stride, + slice_buffer * sb, int add, + uint8_t * dst8) +{ + if (src_x&15) { + if (b_w == 16) + inner_add_yblock_bw_16_obmc_32_altivec(obmc, obmc_stride, block, + b_w, b_h, src_x, src_y, + src_stride, sb, add, dst8); + else if (b_w == 8) + inner_add_yblock_bw_8_obmc_16_altivec(obmc, obmc_stride, block, + b_w, b_h, src_x, src_y, + src_stride, sb, add, dst8); + else + ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x, + src_y, src_stride, sb, add, dst8); + } else { + if (b_w == 16) + inner_add_yblock_a_bw_16_obmc_32_altivec(obmc, obmc_stride, block, + b_w, b_h, src_x, src_y, + src_stride, sb, add, dst8); + else if (b_w == 8) + inner_add_yblock_a_bw_8_obmc_16_altivec(obmc, obmc_stride, block, + b_w, b_h, src_x, src_y, + src_stride, sb, add, dst8); + else + ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x, + src_y, src_stride, sb, add, dst8); + } +} + + +void snow_init_altivec(DSPContext* c, AVCodecContext *avctx) +{ + c->horizontal_compose97i = ff_snow_horizontal_compose97i_altivec; + c->vertical_compose97i = ff_snow_vertical_compose97i_altivec; + c->inner_add_yblock = ff_snow_inner_add_yblock_altivec; +} diff --git a/src/libffmpeg/libavcodec/ppc/types_altivec.h b/src/libffmpeg/libavcodec/ppc/types_altivec.h new file mode 100644 index 000000000..f29026e04 --- /dev/null +++ b/src/libffmpeg/libavcodec/ppc/types_altivec.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2006 Guillaume Poirier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/*********************************************************************** + * Vector types + **********************************************************************/ +#define vec_u8_t vector unsigned char +#define vec_s8_t vector signed char +#define vec_u16_t vector unsigned short +#define vec_s16_t vector signed short +#define vec_u32_t vector unsigned int +#define vec_s32_t vector signed int + +/*********************************************************************** + * Null vector + **********************************************************************/ +#define LOAD_ZERO const vec_u8_t zerov = vec_splat_u8( 0 ) + +#define zero_u8v (vec_u8_t) zerov +#define zero_s8v (vec_s8_t) zerov +#define zero_u16v (vec_u16_t) zerov +#define zero_s16v (vec_s16_t) zerov +#define zero_u32v (vec_u32_t) zerov +#define zero_s32v (vec_s32_t) zerov diff --git a/src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c b/src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c new file mode 100644 index 000000000..114c9d41f --- /dev/null +++ b/src/libffmpeg/libavcodec/ppc/vc1dsp_altivec.c @@ -0,0 +1,338 @@ +/* + * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized + * Copyright (c) 2006 Konstantin Shishkov + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + */ + +#include "../dsputil.h" + +#include "gcc_fixes.h" + +#include "dsputil_altivec.h" + +// main steps of 8x8 transform +#define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \ +do { \ + t0 = vec_sl(vec_add(s0, s4), vec_2); \ + t0 = vec_add(vec_sl(t0, vec_1), t0); \ + t0 = vec_add(t0, vec_rnd); \ + t1 = vec_sl(vec_sub(s0, s4), vec_2); \ + t1 = vec_add(vec_sl(t1, vec_1), t1); \ + t1 = vec_add(t1, vec_rnd); \ + t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \ + t2 = vec_add(t2, vec_sl(s2, vec_4)); \ + t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \ + t3 = vec_sub(t3, vec_sl(s6, vec_4)); \ + t4 = vec_add(t0, t2); \ + t5 = vec_add(t1, t3); \ + t6 = vec_sub(t1, t3); \ + t7 = vec_sub(t0, t2); \ +\ + t0 = vec_sl(vec_add(s1, s3), vec_4); \ + t0 = vec_add(t0, vec_sl(s5, vec_3)); \ + t0 = vec_add(t0, vec_sl(s7, vec_2)); \ + t0 = vec_add(t0, vec_sub(s5, s3)); \ +\ + t1 = vec_sl(vec_sub(s1, s5), vec_4); \ + t1 = vec_sub(t1, vec_sl(s7, vec_3)); \ + t1 = vec_sub(t1, vec_sl(s3, vec_2)); \ + t1 = vec_sub(t1, vec_add(s1, s7)); \ +\ + t2 = vec_sl(vec_sub(s7, s3), vec_4); \ + t2 = vec_add(t2, vec_sl(s1, vec_3)); \ + t2 = vec_add(t2, vec_sl(s5, vec_2)); \ + t2 = vec_add(t2, vec_sub(s1, s7)); \ +\ + t3 = vec_sl(vec_sub(s5, s7), vec_4); \ + t3 = vec_sub(t3, vec_sl(s3, vec_3)); \ + t3 = vec_add(t3, vec_sl(s1, vec_2)); \ + t3 = vec_sub(t3, vec_add(s3, s5)); \ +\ + s0 = vec_add(t4, t0); \ + s1 = vec_add(t5, t1); \ + s2 = vec_add(t6, t2); \ + s3 = vec_add(t7, t3); \ + s4 = vec_sub(t7, t3); \ + s5 = vec_sub(t6, t2); \ + s6 = vec_sub(t5, t1); \ + s7 = vec_sub(t4, t0); \ +}while(0) + +#define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \ +do { \ + s0 = vec_sra(s0, vec_3); \ + s1 = vec_sra(s1, vec_3); \ + s2 = vec_sra(s2, vec_3); \ + s3 = vec_sra(s3, vec_3); \ + s4 = vec_sra(s4, vec_3); \ + s5 = vec_sra(s5, vec_3); \ + s6 = vec_sra(s6, vec_3); \ + s7 = vec_sra(s7, vec_3); \ +}while(0) + +#define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \ +do { \ + s0 = vec_sra(s0, vec_7); \ + s1 = vec_sra(s1, vec_7); \ + s2 = vec_sra(s2, vec_7); \ + s3 = vec_sra(s3, vec_7); \ + s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \ + s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \ + s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \ + s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \ +}while(0) + +/* main steps of 4x4 transform */ +#define STEP4(s0, s1, s2, s3, vec_rnd) \ +do { \ + t1 = vec_add(vec_sl(s0, vec_4), s0); \ + t1 = vec_add(t1, vec_rnd); \ + t2 = vec_add(vec_sl(s2, vec_4), s2); \ + t0 = vec_add(t1, t2); \ + t1 = vec_sub(t1, t2); \ + t3 = vec_sl(vec_sub(s3, s1), vec_1); \ + t3 = vec_add(t3, vec_sl(t3, vec_2)); \ + t2 = vec_add(t3, vec_sl(s1, vec_5)); \ + t3 = vec_add(t3, vec_sl(s3, vec_3)); \ + t3 = vec_add(t3, vec_sl(s3, vec_2)); \ + s0 = vec_add(t0, t2); \ + s1 = vec_sub(t1, t3); \ + s2 = vec_add(t1, t3); \ + s3 = vec_sub(t0, t2); \ +}while (0) + +#define SHIFT_HOR4(s0, s1, s2, s3) \ + s0 = vec_sra(s0, vec_3); \ + s1 = vec_sra(s1, vec_3); \ + s2 = vec_sra(s2, vec_3); \ + s3 = vec_sra(s3, vec_3); + +#define SHIFT_VERT4(s0, s1, s2, s3) \ + s0 = vec_sra(s0, vec_7); \ + s1 = vec_sra(s1, vec_7); \ + s2 = vec_sra(s2, vec_7); \ + s3 = vec_sra(s3, vec_7); + +/** Do inverse transform on 8x8 block +*/ +static void vc1_inv_trans_8x8_altivec(DCTELEM block[64]) +{ + vector signed short src0, src1, src2, src3, src4, src5, src6, src7; + vector signed int s0, s1, s2, s3, s4, s5, s6, s7; + vector signed int s8, s9, sA, sB, sC, sD, sE, sF; + vector signed int t0, t1, t2, t3, t4, t5, t6, t7; + const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); + const vector unsigned int vec_7 = vec_splat_u32(7); + const vector unsigned int vec_5 = vec_splat_u32(5); + const vector unsigned int vec_4 = vec_splat_u32(4); + const vector signed int vec_4s = vec_splat_s32(4); + const vector unsigned int vec_3 = vec_splat_u32(3); + const vector unsigned int vec_2 = vec_splat_u32(2); + const vector signed int vec_1s = vec_splat_s32(1); + const vector unsigned int vec_1 = vec_splat_u32(1); + + + src0 = vec_ld( 0, block); + src1 = vec_ld( 16, block); + src2 = vec_ld( 32, block); + src3 = vec_ld( 48, block); + src4 = vec_ld( 64, block); + src5 = vec_ld( 80, block); + src6 = vec_ld( 96, block); + src7 = vec_ld(112, block); + + TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); + s0 = vec_unpackl(src0); + s1 = vec_unpackl(src1); + s2 = vec_unpackl(src2); + s3 = vec_unpackl(src3); + s4 = vec_unpackl(src4); + s5 = vec_unpackl(src5); + s6 = vec_unpackl(src6); + s7 = vec_unpackl(src7); + s8 = vec_unpackh(src0); + s9 = vec_unpackh(src1); + sA = vec_unpackh(src2); + sB = vec_unpackh(src3); + sC = vec_unpackh(src4); + sD = vec_unpackh(src5); + sE = vec_unpackh(src6); + sF = vec_unpackh(src7); + STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); + SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); + STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); + SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); + src0 = vec_pack(s8, s0); + src1 = vec_pack(s9, s1); + src2 = vec_pack(sA, s2); + src3 = vec_pack(sB, s3); + src4 = vec_pack(sC, s4); + src5 = vec_pack(sD, s5); + src6 = vec_pack(sE, s6); + src7 = vec_pack(sF, s7); + TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); + + s0 = vec_unpackl(src0); + s1 = vec_unpackl(src1); + s2 = vec_unpackl(src2); + s3 = vec_unpackl(src3); + s4 = vec_unpackl(src4); + s5 = vec_unpackl(src5); + s6 = vec_unpackl(src6); + s7 = vec_unpackl(src7); + s8 = vec_unpackh(src0); + s9 = vec_unpackh(src1); + sA = vec_unpackh(src2); + sB = vec_unpackh(src3); + sC = vec_unpackh(src4); + sD = vec_unpackh(src5); + sE = vec_unpackh(src6); + sF = vec_unpackh(src7); + STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64); + SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7); + STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64); + SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF); + src0 = vec_pack(s8, s0); + src1 = vec_pack(s9, s1); + src2 = vec_pack(sA, s2); + src3 = vec_pack(sB, s3); + src4 = vec_pack(sC, s4); + src5 = vec_pack(sD, s5); + src6 = vec_pack(sE, s6); + src7 = vec_pack(sF, s7); + + vec_st(src0, 0, block); + vec_st(src1, 16, block); + vec_st(src2, 32, block); + vec_st(src3, 48, block); + vec_st(src4, 64, block); + vec_st(src5, 80, block); + vec_st(src6, 96, block); + vec_st(src7,112, block); +} + +/** Do inverse transform on 8x4 part of block +*/ +static void vc1_inv_trans_8x4_altivec(DCTELEM block[64], int n) +{ + vector signed short src0, src1, src2, src3, src4, src5, src6, src7; + vector signed int s0, s1, s2, s3, s4, s5, s6, s7; + vector signed int s8, s9, sA, sB, sC, sD, sE, sF; + vector signed int t0, t1, t2, t3, t4, t5, t6, t7; + const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4)); + const vector unsigned int vec_7 = vec_splat_u32(7); + const vector unsigned int vec_5 = vec_splat_u32(5); + const vector unsigned int vec_4 = vec_splat_u32(4); + const vector signed int vec_4s = vec_splat_s32(4); + const vector unsigned int vec_3 = vec_splat_u32(3); + const vector unsigned int vec_2 = vec_splat_u32(2); + const vector unsigned int vec_1 = vec_splat_u32(1); + + src0 = vec_ld( 0, block); + src1 = vec_ld( 16, block); + src2 = vec_ld( 32, block); + src3 = vec_ld( 48, block); + src4 = vec_ld( 64, block); + src5 = vec_ld( 80, block); + src6 = vec_ld( 96, block); + src7 = vec_ld(112, block); + + TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); + s0 = vec_unpackl(src0); + s1 = vec_unpackl(src1); + s2 = vec_unpackl(src2); + s3 = vec_unpackl(src3); + s4 = vec_unpackl(src4); + s5 = vec_unpackl(src5); + s6 = vec_unpackl(src6); + s7 = vec_unpackl(src7); + s8 = vec_unpackh(src0); + s9 = vec_unpackh(src1); + sA = vec_unpackh(src2); + sB = vec_unpackh(src3); + sC = vec_unpackh(src4); + sD = vec_unpackh(src5); + sE = vec_unpackh(src6); + sF = vec_unpackh(src7); + STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s); + SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7); + STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s); + SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF); + src0 = vec_pack(s8, s0); + src1 = vec_pack(s9, s1); + src2 = vec_pack(sA, s2); + src3 = vec_pack(sB, s3); + src4 = vec_pack(sC, s4); + src5 = vec_pack(sD, s5); + src6 = vec_pack(sE, s6); + src7 = vec_pack(sF, s7); + TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7); + + if(!n){ // upper half of block + s0 = vec_unpackh(src0); + s1 = vec_unpackh(src1); + s2 = vec_unpackh(src2); + s3 = vec_unpackh(src3); + s8 = vec_unpackl(src0); + s9 = vec_unpackl(src1); + sA = vec_unpackl(src2); + sB = vec_unpackl(src3); + STEP4(s0, s1, s2, s3, vec_64); + SHIFT_VERT4(s0, s1, s2, s3); + STEP4(s8, s9, sA, sB, vec_64); + SHIFT_VERT4(s8, s9, sA, sB); + src0 = vec_pack(s0, s8); + src1 = vec_pack(s1, s9); + src2 = vec_pack(s2, sA); + src3 = vec_pack(s3, sB); + + vec_st(src0, 0, block); + vec_st(src1, 16, block); + vec_st(src2, 32, block); + vec_st(src3, 48, block); + } else { //lower half of block + s0 = vec_unpackh(src4); + s1 = vec_unpackh(src5); + s2 = vec_unpackh(src6); + s3 = vec_unpackh(src7); + s8 = vec_unpackl(src4); + s9 = vec_unpackl(src5); + sA = vec_unpackl(src6); + sB = vec_unpackl(src7); + STEP4(s0, s1, s2, s3, vec_64); + SHIFT_VERT4(s0, s1, s2, s3); + STEP4(s8, s9, sA, sB, vec_64); + SHIFT_VERT4(s8, s9, sA, sB); + src4 = vec_pack(s0, s8); + src5 = vec_pack(s1, s9); + src6 = vec_pack(s2, sA); + src7 = vec_pack(s3, sB); + + vec_st(src4, 64, block); + vec_st(src5, 80, block); + vec_st(src6, 96, block); + vec_st(src7,112, block); + } +} + + +void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) { + dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec; + dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec; +} -- cgit v1.2.3 From 7c3728d769962d288b73cc945c3143ae68726984 Mon Sep 17 00:00:00 2001 From: Miguel Freitas Date: Sun, 28 Jan 2007 18:38:32 +0000 Subject: another ffmpeg sync to include h264 security fixes CVS patchset: 8573 CVS date: 2007/01/28 18:38:32 --- src/libffmpeg/libavcodec/ppc/dsputil_altivec.c | 6 ++---- src/libffmpeg/libavcodec/ppc/float_altivec.c | 3 +-- 2 files changed, 3 insertions(+), 6 deletions(-) (limited to 'src/libffmpeg/libavcodec/ppc') diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c index 6f48893a4..bbc53d761 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c @@ -1107,12 +1107,10 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); register vector signed short srcV, dstV; \ register vector signed short but0, but1, but2, op1, op2, op3; \ src1 = vec_ld(stride * i, src); \ - if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8) \ - src2 = vec_ld((stride * i) + 16, src); \ + src2 = vec_ld((stride * i) + 15, src); \ srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ dst1 = vec_ld(stride * i, dst); \ - if ((((stride * i) + (unsigned long)dst) & 0x0000000F) > 8) \ - dst2 = vec_ld((stride * i) + 16, dst); \ + dst2 = vec_ld((stride * i) + 15, dst); \ dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ /* promote the unsigned chars to signed shorts */ \ /* we're in the 8x8 function, we only care for the first 8 */ \ diff --git a/src/libffmpeg/libavcodec/ppc/float_altivec.c b/src/libffmpeg/libavcodec/ppc/float_altivec.c index c6e43dec2..22c2de61a 100644 --- a/src/libffmpeg/libavcodec/ppc/float_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/float_altivec.c @@ -76,7 +76,6 @@ static void vector_fmul_add_add_altivec(float *dst, const float *src0, vector unsigned char align = vec_lvsr(0,dst), mask = vec_lvsl(0, dst); - t0 = vec_ld(0, dst); #if 0 //FIXME: there is still something wrong if (step == 2) { int y; @@ -134,6 +133,7 @@ static void vector_fmul_add_add_altivec(float *dst, const float *src0, #endif if (step == 1 && src3 == 0) for (i=0; i Date: Mon, 2 Apr 2007 20:44:04 +0200 Subject: Migrate all .cvsignore files to .hgignore. --HG-- rename : .cvsignore => .hgignore rename : doc/.cvsignore => doc/.hgignore rename : doc/faq/.cvsignore => doc/faq/.hgignore rename : doc/hackersguide/.cvsignore => doc/hackersguide/.hgignore rename : doc/man/.cvsignore => doc/man/.hgignore rename : doc/man/en/.cvsignore => doc/man/en/.hgignore rename : include/.cvsignore => include/.hgignore rename : intl/.cvsignore => intl/.hgignore rename : lib/.cvsignore => lib/.hgignore rename : m4/.cvsignore => m4/.hgignore rename : misc/.cvsignore => misc/.hgignore rename : misc/fonts/.cvsignore => misc/fonts/.hgignore rename : po/.cvsignore => po/.hgignore rename : src/.cvsignore => src/.hgignore rename : src/audio_out/.cvsignore => src/audio_out/.hgignore rename : src/combined/.cvsignore => src/combined/.hgignore rename : src/demuxers/.cvsignore => src/demuxers/.hgignore rename : src/dxr3/.cvsignore => src/dxr3/.hgignore rename : src/input/.cvsignore => src/input/.hgignore rename : src/input/dvb/.cvsignore => src/input/dvb/.hgignore rename : src/input/libdvdnav/.cvsignore => src/input/libdvdnav/.hgignore rename : src/input/libreal/.cvsignore => src/input/libreal/.hgignore rename : src/input/librtsp/.cvsignore => src/input/librtsp/.hgignore rename : src/input/vcd/.cvsignore => src/input/vcd/.hgignore rename : src/input/vcd/libcdio/.cvsignore => src/input/vcd/libcdio/.hgignore rename : src/input/vcd/libcdio/MSWindows/.cvsignore => src/input/vcd/libcdio/MSWindows/.hgignore rename : src/input/vcd/libcdio/cdio/.cvsignore => src/input/vcd/libcdio/cdio/.hgignore rename : src/input/vcd/libcdio/image/.cvsignore => src/input/vcd/libcdio/image/.hgignore rename : src/input/vcd/libvcd/.cvsignore => src/input/vcd/libvcd/.hgignore rename : src/input/vcd/libvcd/libvcd/.cvsignore => src/input/vcd/libvcd/libvcd/.hgignore rename : src/liba52/.cvsignore => src/liba52/.hgignore rename : src/libdts/.cvsignore => src/libdts/.hgignore rename : src/libfaad/.cvsignore => src/libfaad/.hgignore rename : src/libfaad/codebook/.cvsignore => src/libfaad/codebook/.hgignore rename : src/libffmpeg/.cvsignore => src/libffmpeg/.hgignore rename : src/libffmpeg/libavcodec/.cvsignore => src/libffmpeg/libavcodec/.hgignore rename : src/libffmpeg/libavcodec/alpha/.cvsignore => src/libffmpeg/libavcodec/alpha/.hgignore rename : src/libffmpeg/libavcodec/armv4l/.cvsignore => src/libffmpeg/libavcodec/armv4l/.hgignore rename : src/libffmpeg/libavcodec/i386/.cvsignore => src/libffmpeg/libavcodec/i386/.hgignore rename : src/libffmpeg/libavcodec/libpostproc/.cvsignore => src/libffmpeg/libavcodec/libpostproc/.hgignore rename : src/libffmpeg/libavcodec/mlib/.cvsignore => src/libffmpeg/libavcodec/mlib/.hgignore rename : src/libffmpeg/libavcodec/ppc/.cvsignore => src/libffmpeg/libavcodec/ppc/.hgignore rename : src/libffmpeg/libavcodec/sparc/.cvsignore => src/libffmpeg/libavcodec/sparc/.hgignore rename : src/libffmpeg/libavutil/.cvsignore => src/libffmpeg/libavutil/.hgignore rename : src/libflac/.cvsignore => src/libflac/.hgignore rename : src/liblpcm/.cvsignore => src/liblpcm/.hgignore rename : src/libmad/.cvsignore => src/libmad/.hgignore rename : src/libmpeg2/.cvsignore => src/libmpeg2/.hgignore rename : src/libmpeg2new/.cvsignore => src/libmpeg2new/.hgignore rename : src/libmpeg2new/include/.cvsignore => src/libmpeg2new/include/.hgignore rename : src/libmpeg2new/libmpeg2/.cvsignore => src/libmpeg2new/libmpeg2/.hgignore rename : src/libmusepack/.cvsignore => src/libmusepack/.hgignore rename : src/libmusepack/musepack/.cvsignore => src/libmusepack/musepack/.hgignore rename : src/libreal/.cvsignore => src/libreal/.hgignore rename : src/libspeex/.cvsignore => src/libspeex/.hgignore rename : src/libspucc/.cvsignore => src/libspucc/.hgignore rename : src/libspucmml/.cvsignore => src/libspucmml/.hgignore rename : src/libspudec/.cvsignore => src/libspudec/.hgignore rename : src/libspudvb/.cvsignore => src/libspudvb/.hgignore rename : src/libsputext/.cvsignore => src/libsputext/.hgignore rename : src/libtheora/.cvsignore => src/libtheora/.hgignore rename : src/libvorbis/.cvsignore => src/libvorbis/.hgignore rename : src/libw32dll/.cvsignore => src/libw32dll/.hgignore rename : src/libw32dll/DirectShow/.cvsignore => src/libw32dll/DirectShow/.hgignore rename : src/libw32dll/dmo/.cvsignore => src/libw32dll/dmo/.hgignore rename : src/libw32dll/qtx/.cvsignore => src/libw32dll/qtx/.hgignore rename : src/libw32dll/qtx/qtxsdk/.cvsignore => src/libw32dll/qtx/qtxsdk/.hgignore rename : src/libw32dll/wine/.cvsignore => src/libw32dll/wine/.hgignore rename : src/libxineadec/.cvsignore => src/libxineadec/.hgignore rename : src/libxineadec/gsm610/.cvsignore => src/libxineadec/gsm610/.hgignore rename : src/libxineadec/nosefart/.cvsignore => src/libxineadec/nosefart/.hgignore rename : src/libxinevdec/.cvsignore => src/libxinevdec/.hgignore rename : src/post/.cvsignore => src/post/.hgignore rename : src/post/audio/.cvsignore => src/post/audio/.hgignore rename : src/post/deinterlace/.cvsignore => src/post/deinterlace/.hgignore rename : src/post/deinterlace/plugins/.cvsignore => src/post/deinterlace/plugins/.hgignore rename : src/post/goom/.cvsignore => src/post/goom/.hgignore rename : src/post/mosaico/.cvsignore => src/post/mosaico/.hgignore rename : src/post/planar/.cvsignore => src/post/planar/.hgignore rename : src/post/visualizations/.cvsignore => src/post/visualizations/.hgignore rename : src/video_out/.cvsignore => src/video_out/.hgignore rename : src/video_out/libdha/.cvsignore => src/video_out/libdha/.hgignore rename : src/video_out/libdha/bin/.cvsignore => src/video_out/libdha/bin/.hgignore rename : src/video_out/libdha/kernelhelper/.cvsignore => src/video_out/libdha/kernelhelper/.hgignore rename : src/video_out/libdha/oth/.cvsignore => src/video_out/libdha/oth/.hgignore rename : src/video_out/libdha/sysdep/.cvsignore => src/video_out/libdha/sysdep/.hgignore rename : src/video_out/macosx/.cvsignore => src/video_out/macosx/.hgignore rename : src/video_out/vidix/.cvsignore => src/video_out/vidix/.hgignore rename : src/video_out/vidix/drivers/.cvsignore => src/video_out/vidix/drivers/.hgignore rename : src/xine-engine/.cvsignore => src/xine-engine/.hgignore rename : src/xine-utils/.cvsignore => src/xine-utils/.hgignore rename : win32/.cvsignore => win32/.hgignore rename : win32/include/.cvsignore => win32/include/.hgignore --- src/libffmpeg/libavcodec/ppc/.cvsignore | 6 ------ src/libffmpeg/libavcodec/ppc/.hgignore | 6 ++++++ 2 files changed, 6 insertions(+), 6 deletions(-) delete mode 100644 src/libffmpeg/libavcodec/ppc/.cvsignore create mode 100644 src/libffmpeg/libavcodec/ppc/.hgignore (limited to 'src/libffmpeg/libavcodec/ppc') diff --git a/src/libffmpeg/libavcodec/ppc/.cvsignore b/src/libffmpeg/libavcodec/ppc/.cvsignore deleted file mode 100644 index 7d926a554..000000000 --- a/src/libffmpeg/libavcodec/ppc/.cvsignore +++ /dev/null @@ -1,6 +0,0 @@ -Makefile -Makefile.in -.libs -.deps -*.lo -*.la diff --git a/src/libffmpeg/libavcodec/ppc/.hgignore b/src/libffmpeg/libavcodec/ppc/.hgignore new file mode 100644 index 000000000..7d926a554 --- /dev/null +++ b/src/libffmpeg/libavcodec/ppc/.hgignore @@ -0,0 +1,6 @@ +Makefile +Makefile.in +.libs +.deps +*.lo +*.la -- cgit v1.2.3