diff options
author | Miguel Freitas <miguelfreitas@users.sourceforge.net> | 2006-02-05 14:11:16 +0000 |
---|---|---|
committer | Miguel Freitas <miguelfreitas@users.sourceforge.net> | 2006-02-05 14:11:16 +0000 |
commit | 40c9e400fdaf1e9bb49eb0d0f0e437025b0ab6e8 (patch) | |
tree | 0a0d683a843d5d50b12233116b777e8e2780cad9 /src/libffmpeg/libavcodec/ppc | |
parent | 9a299a12ea17a363354f451d2efd15bfbf3c7dc6 (diff) | |
download | xine-lib-40c9e400fdaf1e9bb49eb0d0f0e437025b0ab6e8.tar.gz xine-lib-40c9e400fdaf1e9bb49eb0d0f0e437025b0ab6e8.tar.bz2 |
ffmpeg sync
CVS patchset: 7872
CVS date: 2006/02/05 14:11:16
Diffstat (limited to 'src/libffmpeg/libavcodec/ppc')
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/dsputil_altivec.c | 308 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/dsputil_altivec.h | 2 | ||||
-rwxr-xr-x | src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c | 86 | ||||
-rwxr-xr-x | src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c | 70 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/dsputil_ppc.c | 68 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/dsputil_ppc.h | 14 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/fdct_altivec.c | 2 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/fft_altivec.c | 52 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/gcc_fixes.h | 28 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/gmc_altivec.c | 32 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/idct_altivec.c | 250 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c | 42 | ||||
-rw-r--r-- | src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c | 6 |
13 files changed, 480 insertions, 480 deletions
diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c index 57b687dfd..31464fb7a 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.c @@ -15,9 +15,9 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ - + #include "../dsputil.h" #include "gcc_fixes.h" @@ -44,7 +44,7 @@ static void sigill_handler (int sig) signal (sig, SIG_DFL); raise (sig); } - + canjump = 0; siglongjmp (jmpbuf, 1); } @@ -67,11 +67,11 @@ int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] - pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] + pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] */ tv = (vector unsigned char *) pix1; pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1)); - + tv = (vector unsigned char *) &pix2[0]; pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0])); @@ -86,7 +86,7 @@ int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); - + pix1 += line_size; pix2 += line_size; } @@ -123,7 +123,7 @@ int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h */ tv = (vector unsigned char *) &pix2[0]; pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0])); - + for(i=0;i<h;i++) { /* Read unaligned pixels into our vectors. The vectors are as follows: @@ -144,18 +144,18 @@ int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); - + pix1 += line_size; pix2v = pix3v; pix3 += line_size; - + } - + /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); - return s; + return s; } int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) @@ -175,7 +175,7 @@ int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int vector signed int sumdiffs; sad = (vector unsigned int)vec_splat_u32(0); - + s = 0; /* @@ -184,7 +184,7 @@ int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int fact to avoid a potentially expensive unaligned read, as well as some splitting, and vector addition each time around the loop. Read unaligned pixels into our vectors. The vectors are as follows: - pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] + pix2v: pix2[0]-pix2[15] pix2iv: pix2[1]-pix2[16] Split the pixel vectors into shorts */ tv = (vector unsigned char *) &pix2[0]; @@ -199,12 +199,12 @@ int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int pix2ilv = (vector unsigned short) vec_mergel(zero, pix2iv); t1 = vec_add(pix2hv, pix2ihv); t2 = vec_add(pix2lv, pix2ilv); - + for(i=0;i<h;i++) { /* Read unaligned pixels into our vectors. The vectors are as follows: pix1v: pix1[0]-pix1[15] - pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] + pix3v: pix3[0]-pix3[15] pix3iv: pix3[1]-pix3[16] */ tv = (vector unsigned char *) pix1; pix1v = vec_perm(tv[0], tv[1], vec_lvsl(0, pix1)); @@ -268,25 +268,25 @@ int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) vector unsigned char t1, t2, t3,t4, t5; vector unsigned int sad; vector signed int sumdiffs; - + sad = (vector unsigned int)vec_splat_u32(0); for(i=0;i<h;i++) { - /* Read potentially unaligned pixels into t1 and t2 */ + /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); pix2v = (vector unsigned char *) pix2; t1 = vec_perm(pix1v[0], pix1v[1], perm1); t2 = vec_perm(pix2v[0], pix2v[1], perm2); - - /* Calculate a sum of abs differences vector */ + + /* Calculate a sum of abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); - - /* Add each 4 pixel group together and put 4 results into sad */ + + /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; @@ -297,7 +297,7 @@ int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); - + return s; } @@ -316,9 +316,9 @@ int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0); for(i=0;i<h;i++) { - /* Read potentially unaligned pixels into t1 and t2 - Since we're reading 16 pixels, and actually only want 8, - mask out the last 8 pixels. The 0s don't change the sum. */ + /* Read potentially unaligned pixels into t1 and t2 + Since we're reading 16 pixels, and actually only want 8, + mask out the last 8 pixels. The 0s don't change the sum. */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); @@ -326,12 +326,12 @@ int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) t1 = vec_and(vec_perm(pix1v[0], pix1v[1], perm1), permclear); t2 = vec_and(vec_perm(pix2v[0], pix2v[1], perm2), permclear); - /* Calculate a sum of abs differences vector */ + /* Calculate a sum of abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); - /* Add each 4 pixel group together and put 4 results into sad */ + /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t5, sad); pix1 += line_size; @@ -355,9 +355,9 @@ int pix_norm1_altivec(uint8_t *pix, int line_size) vector unsigned char pixv; vector unsigned int sv; vector signed int sum; - + sv = (vector unsigned int)vec_splat_u32(0); - + s = 0; for (i = 0; i < 16; i++) { /* Read in the potentially unaligned pixels */ @@ -391,16 +391,16 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) vector unsigned char t1, t2, t3,t4, t5; vector unsigned int sum; vector signed int sumsqr; - + sum = (vector unsigned int)vec_splat_u32(0); permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0); - + for(i=0;i<h;i++) { - /* Read potentially unaligned pixels into t1 and t2 - Since we're reading 16 pixels, and actually only want 8, - mask out the last 8 pixels. The 0s don't change the sum. */ + /* Read potentially unaligned pixels into t1 and t2 + Since we're reading 16 pixels, and actually only want 8, + mask out the last 8 pixels. The 0s don't change the sum. */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); @@ -412,24 +412,24 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) Since we want to use unsigned chars, we can take advantage of the fact that abs(a-b)^2 = (a-b)^2. */ - - /* Calculate abs differences vector */ + + /* Calculate abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); - + /* Square the values and add them to our sum */ sum = vec_msum(t5, t5, sum); - + pix1 += line_size; pix2 += line_size; } - + /* Sum up the four partial sums, and put the result into s */ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero); sumsqr = vec_splat(sumsqr, 3); vec_ste(sumsqr, 0, &s); - + return s; } @@ -447,11 +447,11 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) vector unsigned char t1, t2, t3,t4, t5; vector unsigned int sum; vector signed int sumsqr; - + sum = (vector unsigned int)vec_splat_u32(0); - + for(i=0;i<h;i++) { - /* Read potentially unaligned pixels into t1 and t2 */ + /* Read potentially unaligned pixels into t1 and t2 */ perm1 = vec_lvsl(0, pix1); pix1v = (vector unsigned char *) pix1; perm2 = vec_lvsl(0, pix2); @@ -463,24 +463,24 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) Since we want to use unsigned chars, we can take advantage of the fact that abs(a-b)^2 = (a-b)^2. */ - - /* Calculate abs differences vector */ + + /* Calculate abs differences vector */ t3 = vec_max(t1, t2); t4 = vec_min(t1, t2); t5 = vec_sub(t3, t4); - + /* Square the values and add them to our sum */ sum = vec_msum(t5, t5, sum); - + pix1 += line_size; pix2 += line_size; } - + /* Sum up the four partial sums, and put the result into s */ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero); sumsqr = vec_splat(sumsqr, 3); vec_ste(sumsqr, 0, &s); - + return s; } @@ -494,26 +494,26 @@ int pix_sum_altivec(uint8_t * pix, int line_size) int i; int s __attribute__((aligned(16))); - + sad = (vector unsigned int)vec_splat_u32(0); - + for (i = 0; i < 16; i++) { - /* Read the potentially unaligned 16 pixels into t1 */ + /* Read the potentially unaligned 16 pixels into t1 */ perm = vec_lvsl(0, pix); pixv = (vector unsigned char *) pix; t1 = vec_perm(pixv[0], pixv[1], perm); - /* Add each 4 pixel group together and put 4 results into sad */ + /* Add each 4 pixel group together and put 4 results into sad */ sad = vec_sum4s(t1, sad); - + pix += line_size; } - + /* Sum up the four partial sums, and put the result into s */ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero); sumdiffs = vec_splat(sumdiffs, 3); vec_ste(sumdiffs, 0, &s); - + return s; } @@ -633,7 +633,7 @@ void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) { #else /* ALTIVEC_USE_REFERENCE_C_CODE */ register int i; register vector unsigned char vdst, vsrc; - + /* dst and src are 16 bytes-aligned (guaranteed) */ for(i = 0 ; (i + 15) < w ; i++) { @@ -799,19 +799,19 @@ POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1); int i; POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1); - + for (i = 0; i < h; i++) { /* block is 8 bytes-aligned, so we're either in the left block (16 bytes-aligned) or in the right block (not) */ int rightside = ((unsigned long)block & 0x0000000F); - + blockv = vec_ld(0, block); pixelsv1 = vec_ld(0, (unsigned char*)pixels); pixelsv2 = vec_ld(16, (unsigned char*)pixels); pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels)); - + if (rightside) { pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1)); @@ -820,17 +820,17 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels8_num, 1); { pixelsv = vec_perm(blockv, pixelsv, vcprm(s0,s1,2,3)); } - + blockv = vec_avg(blockv, pixelsv); vec_st(blockv, 0, block); - + pixels += line_size; block += line_size; } - + POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_num, 1); - + #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } @@ -886,7 +886,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1); pixelssum1, pixelssum2, temp3; register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0); register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2); - + temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); @@ -903,8 +903,8 @@ POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vctwo); - -POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1); + +POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1); for (i = 0; i < h ; i++) { int rightside = ((unsigned long)block & 0x0000000F); blockv = vec_ld(0, block); @@ -929,7 +929,7 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1); temp3 = vec_sra(temp3, vctwo); pixelssum1 = vec_add(pixelssum2, vctwo); pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero); - + if (rightside) { blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1)); @@ -938,13 +938,13 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels8_xy2_num, 1); { blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3)); } - + vec_st(blockv, 0, block); - + block += line_size; pixels += line_size; } - + POWERPC_PERF_STOP_COUNT(altivec_put_pixels8_xy2_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } @@ -987,7 +987,7 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); } pixels += 4 - line_size * (h + 1); block += 4 - line_size * h; } - + POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); #else /* ALTIVEC_USE_REFERENCE_C_CODE */ @@ -1002,7 +1002,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0); register const_vector unsigned short vcone = (const_vector unsigned short)vec_splat_u16(1); register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2); - + temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); @@ -1019,8 +1019,8 @@ POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vcone); - -POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); + +POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); for (i = 0; i < h ; i++) { int rightside = ((unsigned long)block & 0x0000000F); blockv = vec_ld(0, block); @@ -1045,7 +1045,7 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); temp3 = vec_sra(temp3, vctwo); pixelssum1 = vec_add(pixelssum2, vcone); pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero); - + if (rightside) { blockv = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1)); @@ -1054,13 +1054,13 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); { blockv = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3)); } - + vec_st(blockv, 0, block); - + block += line_size; pixels += line_size; } - + POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels8_xy2_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } @@ -1119,7 +1119,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1); register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2); POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1); - + temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); @@ -1141,7 +1141,7 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vctwo); - + for (i = 0; i < h ; i++) { blockv = vec_ld(0, block); @@ -1161,7 +1161,7 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1); pixelsv4 = vec_mergel(vczero, pixelsv2); pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); - + pixelssum4 = vec_add((vector unsigned short)pixelsv3, (vector unsigned short)pixelsv4); pixelssum2 = vec_add((vector unsigned short)pixelsv1, @@ -1175,13 +1175,13 @@ POWERPC_PERF_START_COUNT(altivec_put_pixels16_xy2_num, 1); pixelssum1 = vec_add(pixelssum2, vctwo); blockv = vec_packsu(temp3, temp4); - + vec_st(blockv, 0, block); - + block += line_size; pixels += line_size; } - + POWERPC_PERF_STOP_COUNT(altivec_put_pixels16_xy2_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } @@ -1241,7 +1241,7 @@ POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2); POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); - + temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); @@ -1263,7 +1263,7 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vcone); - + for (i = 0; i < h ; i++) { blockv = vec_ld(0, block); @@ -1283,7 +1283,7 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); pixelsv4 = vec_mergel(vczero, pixelsv2); pixelsv1 = vec_mergeh(vczero, pixelsv1); pixelsv2 = vec_mergeh(vczero, pixelsv2); - + pixelssum4 = vec_add((vector unsigned short)pixelsv3, (vector unsigned short)pixelsv4); pixelssum2 = vec_add((vector unsigned short)pixelsv1, @@ -1297,13 +1297,13 @@ POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); pixelssum1 = vec_add(pixelssum2, vcone); blockv = vec_packsu(temp3, temp4); - + vec_st(blockv, 0, block); - + block += line_size; pixels += line_size; } - + POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } @@ -1335,32 +1335,32 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07); -#define ONEITERBUTTERFLY(i, res) \ - { \ - register vector unsigned char src1, src2, srcO; \ - register vector unsigned char dst1, dst2, dstO; \ - src1 = vec_ld(stride * i, src); \ - if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8) \ - src2 = vec_ld((stride * i) + 16, src); \ - srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ - dst1 = vec_ld(stride * i, dst); \ - if ((((stride * i) + (unsigned long)dst) & 0x0000000F) > 8) \ - dst2 = vec_ld((stride * i) + 16, dst); \ - dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ - /* promote the unsigned chars to signed shorts */ \ - /* we're in the 8x8 function, we only care for the first 8 */ \ - register vector signed short srcV = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ - register vector signed short dstV = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ - /* substractions inside the first butterfly */ \ - register vector signed short but0 = vec_sub(srcV, dstV); \ - register vector signed short op1 = vec_perm(but0, but0, perm1); \ - register vector signed short but1 = vec_mladd(but0, vprod1, op1); \ - register vector signed short op2 = vec_perm(but1, but1, perm2); \ - register vector signed short but2 = vec_mladd(but1, vprod2, op2); \ - register vector signed short op3 = vec_perm(but2, but2, perm3); \ - res = vec_mladd(but2, vprod3, op3); \ +#define ONEITERBUTTERFLY(i, res) \ + { \ + register vector unsigned char src1, src2, srcO; \ + register vector unsigned char dst1, dst2, dstO; \ + src1 = vec_ld(stride * i, src); \ + if ((((stride * i) + (unsigned long)src) & 0x0000000F) > 8) \ + src2 = vec_ld((stride * i) + 16, src); \ + srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ + dst1 = vec_ld(stride * i, dst); \ + if ((((stride * i) + (unsigned long)dst) & 0x0000000F) > 8) \ + dst2 = vec_ld((stride * i) + 16, dst); \ + dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ + /* promote the unsigned chars to signed shorts */ \ + /* we're in the 8x8 function, we only care for the first 8 */ \ + register vector signed short srcV = \ + (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ + register vector signed short dstV = \ + (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ + /* substractions inside the first butterfly */ \ + register vector signed short but0 = vec_sub(srcV, dstV); \ + register vector signed short op1 = vec_perm(but0, but0, perm1); \ + register vector signed short but1 = vec_mladd(but0, vprod1, op1); \ + register vector signed short op2 = vec_perm(but1, but1, perm2); \ + register vector signed short but2 = vec_mladd(but1, vprod2, op2); \ + register vector signed short op3 = vec_perm(but2, but2, perm3); \ + res = vec_mladd(but2, vprod3, op3); \ } ONEITERBUTTERFLY(0, temp0); ONEITERBUTTERFLY(1, temp1); @@ -1382,7 +1382,7 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); register vector signed short line5 = vec_sub(temp4, temp5); register vector signed short line6 = vec_add(temp6, temp7); register vector signed short line7 = vec_sub(temp6, temp7); - + register vector signed short line0B = vec_add(line0, line2); register vector signed short line2B = vec_sub(line0, line2); register vector signed short line1B = vec_add(line1, line3); @@ -1391,7 +1391,7 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); register vector signed short line6B = vec_sub(line4, line6); register vector signed short line5B = vec_add(line5, line7); register vector signed short line7B = vec_sub(line5, line7); - + register vector signed short line0C = vec_add(line0B, line4B); register vector signed short line4C = vec_sub(line0B, line4B); register vector signed short line1C = vec_add(line1B, line5B); @@ -1400,7 +1400,7 @@ POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1); register vector signed short line6C = vec_sub(line2B, line6B); register vector signed short line3C = vec_add(line3B, line7B); register vector signed short line7C = vec_sub(line3B, line7B); - + vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0)); vsum = vec_sum4s(vec_abs(line1C), vsum); vsum = vec_sum4s(vec_abs(line2C), vsum); @@ -1421,7 +1421,7 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1); 16x8 works with 16 elements ; it allows to avoid replicating loads, and give the compiler more rooms for scheduling. It's only used from inside hadamard8_diff16_altivec. - + Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT of spill code, it seems gcc (unlike xlc) cannot keep everything in registers @@ -1429,11 +1429,11 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff8x8_num, 1); registers allocation. It's not clean, but on a 7450 the resulting code is much faster (best case fall from 700+ cycles to 550). - + xlc doesn't add spill code, but it doesn't know how to schedule for the 7450, and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less instructions...) - + On the 970, the hand-made RA is still a win (arount 690 vs. around 780), but xlc goes to around 660 on the regular C code... @@ -1480,26 +1480,26 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07); -#define ONEITERBUTTERFLY(i, res1, res2) \ - { \ +#define ONEITERBUTTERFLY(i, res1, res2) \ + { \ register vector unsigned char src1 asm ("v22"), src2 asm ("v23"); \ register vector unsigned char dst1 asm ("v24"), dst2 asm ("v25"); \ - src1 = vec_ld(stride * i, src); \ - src2 = vec_ld((stride * i) + 16, src); \ + src1 = vec_ld(stride * i, src); \ + src2 = vec_ld((stride * i) + 16, src); \ register vector unsigned char srcO asm ("v22") = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \ - dst1 = vec_ld(stride * i, dst); \ - dst2 = vec_ld((stride * i) + 16, dst); \ + dst1 = vec_ld(stride * i, dst); \ + dst2 = vec_ld((stride * i) + 16, dst); \ register vector unsigned char dstO asm ("v23") = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \ - /* promote the unsigned chars to signed shorts */ \ + /* promote the unsigned chars to signed shorts */ \ register vector signed short srcV asm ("v24") = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ + (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)srcO); \ register vector signed short dstV asm ("v25") = \ - (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ + (vector signed short)vec_mergeh((vector signed char)vzero, (vector signed char)dstO); \ register vector signed short srcW asm ("v26") = \ - (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)srcO); \ + (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)srcO); \ register vector signed short dstW asm ("v27") = \ - (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)dstO); \ - /* substractions inside the first butterfly */ \ + (vector signed short)vec_mergel((vector signed char)vzero, (vector signed char)dstO); \ + /* substractions inside the first butterfly */ \ register vector signed short but0 asm ("v28") = vec_sub(srcV, dstV); \ register vector signed short but0S asm ("v29") = vec_sub(srcW, dstW); \ register vector signed short op1 asm ("v30") = vec_perm(but0, but0, perm1); \ @@ -1511,9 +1511,9 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, register vector signed short op2S asm ("v27") = vec_perm(but1S, but1S, perm2); \ register vector signed short but2S asm ("v28") = vec_mladd(but1S, vprod2, op2S); \ register vector signed short op3 asm ("v29") = vec_perm(but2, but2, perm3); \ - res1 = vec_mladd(but2, vprod3, op3); \ + res1 = vec_mladd(but2, vprod3, op3); \ register vector signed short op3S asm ("v30") = vec_perm(but2S, but2S, perm3); \ - res2 = vec_mladd(but2S, vprod3, op3S); \ + res2 = vec_mladd(but2S, vprod3, op3S); \ } ONEITERBUTTERFLY(0, temp0, temp0S); ONEITERBUTTERFLY(1, temp1, temp1S); @@ -1535,7 +1535,7 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, register vector signed short line5 = vec_sub(temp4, temp5); register vector signed short line6 = vec_add(temp6, temp7); register vector signed short line7 = vec_sub(temp6, temp7); - + register vector signed short line0B = vec_add(line0, line2); register vector signed short line2B = vec_sub(line0, line2); register vector signed short line1B = vec_add(line1, line3); @@ -1544,7 +1544,7 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, register vector signed short line6B = vec_sub(line4, line6); register vector signed short line5B = vec_add(line5, line7); register vector signed short line7B = vec_sub(line5, line7); - + register vector signed short line0C = vec_add(line0B, line4B); register vector signed short line4C = vec_sub(line0B, line4B); register vector signed short line1C = vec_add(line1B, line5B); @@ -1553,7 +1553,7 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, register vector signed short line6C = vec_sub(line2B, line6B); register vector signed short line3C = vec_add(line3B, line7B); register vector signed short line7C = vec_sub(line3B, line7B); - + vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0)); vsum = vec_sum4s(vec_abs(line1C), vsum); vsum = vec_sum4s(vec_abs(line2C), vsum); @@ -1623,12 +1623,12 @@ POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1); int has_altivec(void) { #ifdef __AMIGAOS4__ - ULONG result = 0; - extern struct ExecIFace *IExec; + ULONG result = 0; + extern struct ExecIFace *IExec; - IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE); - if (result == VECTORTYPE_ALTIVEC) return 1; - return 0; + IExec->GetCPUInfoTags(GCIT_VectorUnit, &result, TAG_DONE); + if (result == VECTORTYPE_ALTIVEC) return 1; + return 0; #else /* __AMIGAOS4__ */ #ifdef CONFIG_DARWIN @@ -1649,12 +1649,12 @@ int has_altivec(void) signal (SIGILL, SIG_DFL); } else { canjump = 1; - + asm volatile ("mtspr 256, %0\n\t" "vand %%v0, %%v0, %%v0" : : "r" (-1)); - + signal (SIGILL, SIG_DFL); return 1; } @@ -1710,7 +1710,7 @@ POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1); pixelssum1, pixelssum2, temp3; register const_vector unsigned char vczero = (const_vector unsigned char)vec_splat_u8(0); register const_vector unsigned short vctwo = (const_vector unsigned short)vec_splat_u16(2); - + temp1 = vec_ld(0, pixels); temp2 = vec_ld(16, pixels); pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels)); @@ -1727,8 +1727,8 @@ POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1); pixelssum1 = vec_add((vector unsigned short)pixelsv1, (vector unsigned short)pixelsv2); pixelssum1 = vec_add(pixelssum1, vctwo); - -POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1); + +POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1); for (i = 0; i < h ; i++) { int rightside = ((unsigned long)block & 0x0000000F); blockv = vec_ld(0, block); @@ -1753,7 +1753,7 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1); temp3 = vec_sra(temp3, vctwo); pixelssum1 = vec_add(pixelssum2, vctwo); pixelsavg = vec_packsu(temp3, (vector unsigned short) vczero); - + if (rightside) { blocktemp = vec_perm(blockv, pixelsavg, vcprm(0, 1, s0, s1)); @@ -1762,14 +1762,14 @@ POWERPC_PERF_START_COUNT(altivec_avg_pixels8_xy2_num, 1); { blocktemp = vec_perm(blockv, pixelsavg, vcprm(s0, s1, 2, 3)); } - + blockv = vec_avg(blocktemp, blockv); vec_st(blockv, 0, block); - + block += line_size; pixels += line_size; } - + POWERPC_PERF_STOP_COUNT(altivec_avg_pixels8_xy2_num, 1); #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h index 88f06c372..ac54817d0 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h +++ b/src/libffmpeg/libavcodec/ppc/dsputil_altivec.h @@ -15,7 +15,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _DSPUTIL_ALTIVEC_ diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c index 1891e194a..b9fef005e 100755 --- a/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_h264_altivec.c @@ -13,9 +13,9 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ - + #include "../dsputil.h" #include "gcc_fixes.h" @@ -71,7 +71,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uin }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \ - uint64_t temp[SIZE*SIZE/8] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/8]);\ uint8_t * const half= (uint8_t*)temp;\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ @@ -82,14 +82,14 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/8]);\ uint8_t * const half= (uint8_t*)temp;\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/8]);\ uint8_t * const half= (uint8_t*)temp;\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ @@ -100,14 +100,14 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/8] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/8]);\ uint8_t * const half= (uint8_t*)temp;\ put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/4]);\ uint8_t * const halfH= (uint8_t*)temp;\ uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ @@ -116,7 +116,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/4]);\ uint8_t * const halfH= (uint8_t*)temp;\ uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ @@ -125,7 +125,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/4]);\ uint8_t * const halfH= (uint8_t*)temp;\ uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ @@ -134,7 +134,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*SIZE/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*SIZE/4]);\ uint8_t * const halfH= (uint8_t*)temp;\ uint8_t * const halfV= ((uint8_t*)temp) + SIZE*SIZE;\ put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ @@ -143,13 +143,13 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4]);\ int16_t * const tmp= (int16_t*)temp;\ OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\ }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4]);\ uint8_t * const halfH= (uint8_t*)temp;\ uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ @@ -159,7 +159,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4]);\ uint8_t * const halfH= (uint8_t*)temp;\ uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ @@ -169,7 +169,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4]);\ uint8_t * const halfV= (uint8_t*)temp;\ uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ @@ -179,7 +179,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint }\ \ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ - uint64_t temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4] __align16;\ + DECLARE_ALIGNED_16(uint64_t, temp[SIZE*(SIZE+8)/4 + SIZE*SIZE/4]);\ uint8_t * const halfV= (uint8_t*)temp;\ uint8_t * const halfHV= ((uint8_t*)temp) + SIZE*SIZE;\ int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE;\ @@ -191,33 +191,33 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint /* from dsputil.c */ static inline void put_pixels8_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - int i; - for (i = 0; i < h; i++) { - uint32_t a, b; - a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); - *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(a, b); - a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); - *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(a, b); - } + int i; + for (i = 0; i < h; i++) { + uint32_t a, b; + a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); + b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); + *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(a, b); + a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); + b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); + *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(a, b); + } } static inline void avg_pixels8_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - int i; - for (i = 0; i < h; i++) { - uint32_t a, b; - a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); - *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride]), rnd_avg32(a, b)); - a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); - b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); - *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride + 4]), rnd_avg32(a, b)); - } + int i; + for (i = 0; i < h; i++) { + uint32_t a, b; + a = (((const struct unaligned_32 *) (&src1[i * src_stride1]))->l); + b = (((const struct unaligned_32 *) (&src2[i * src_stride2]))->l); + *((uint32_t *) & dst[i * dst_stride]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride]), rnd_avg32(a, b)); + a = (((const struct unaligned_32 *) (&src1[i * src_stride1 + 4]))->l); + b = (((const struct unaligned_32 *) (&src2[i * src_stride2 + 4]))->l); + *((uint32_t *) & dst[i * dst_stride + 4]) = rnd_avg32(*((uint32_t *) & dst[i * dst_stride + 4]), rnd_avg32(a, b)); + } } static inline void put_pixels16_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - put_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); - put_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); + put_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); + put_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); } static inline void avg_pixels16_l2(uint8_t * dst, const uint8_t * src1, const uint8_t * src2, int dst_stride, int src_stride1, int src_stride2, int h) { - avg_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); - avg_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); + avg_pixels8_l2(dst, src1, src2, dst_stride, src_stride1, src_stride2, h); + avg_pixels8_l2(dst + 8, src1 + 8, src2 + 8, dst_stride, src_stride1, src_stride2, h); } /* UNIMPLEMENTED YET !! */ @@ -228,7 +228,7 @@ H264_MC(put_, 16, altivec) H264_MC(avg_, 16, altivec) void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) { - + #ifdef HAVE_ALTIVEC if (has_altivec()) { c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec; @@ -251,16 +251,16 @@ void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) { c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \ c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \ c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec - + dspfunc(put_h264_qpel, 0, 16); dspfunc(avg_h264_qpel, 0, 16); #undef dspfunc - + } else #endif /* HAVE_ALTIVEC */ { // Non-AltiVec PPC optimisations - + // ... pending ... } } diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c b/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c index cb0fa954d..7f46ccf14 100755 --- a/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_h264_template_altivec.c @@ -13,7 +13,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /* this code assume that stride % 16 == 0 */ @@ -47,7 +47,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1; register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0; - + vector unsigned char vsrcAuc; vector unsigned char vsrcBuc; vector unsigned char vsrcperm0; @@ -57,7 +57,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in vsrcBuc = vec_ld(16, src); vsrcperm0 = vec_lvsl(0, src); vsrcperm1 = vec_lvsl(1, src); - + vector unsigned char vsrc0uc; vector unsigned char vsrc1uc; vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0); @@ -65,7 +65,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in vsrc1uc = vsrcBuc; else vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1); - + vector signed short vsrc0ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc0uc); vector signed short vsrc1ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc1uc); @@ -73,37 +73,37 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in for (i = 0 ; i < h ; i++) { vector unsigned char vsrcCuc; vsrcCuc = vec_ld(stride + 0, src); - + vector unsigned char vsrc2uc; vector unsigned char vsrc3uc; vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0); vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1); - + vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc); vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc); - + vector signed short psum; - + psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); psum = vec_mladd(vC, vsrc2ssH, psum); psum = vec_mladd(vD, vsrc3ssH, psum); psum = vec_add(v32ss, psum); psum = vec_sra(psum, v6us); - + vector unsigned char vdst = vec_ld(0, dst); vector unsigned char ppsum = (vector unsigned char)vec_packsu(psum, psum); - + vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm); vector unsigned char fsum; - + OP_U8_ALTIVEC(fsum, vfdst, vdst); vec_st(fsum, 0, dst); - + vsrc0ssH = vsrc2ssH; vsrc1ssH = vsrc3ssH; - + dst += stride; src += stride; } @@ -113,7 +113,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in vector unsigned char vsrcDuc; vsrcCuc = vec_ld(stride + 0, src); vsrcDuc = vec_ld(stride + 16, src); - + vector unsigned char vsrc2uc; vector unsigned char vsrc3uc; vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0); @@ -121,32 +121,32 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, in vsrc3uc = vsrcDuc; else vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1); - + vector signed short vsrc2ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc2uc); vector signed short vsrc3ssH = (vector signed short)vec_mergeh((vector unsigned char)vzero, (vector unsigned char)vsrc3uc); - + vector signed short psum; - + psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0)); psum = vec_mladd(vB, vsrc1ssH, psum); psum = vec_mladd(vC, vsrc2ssH, psum); psum = vec_mladd(vD, vsrc3ssH, psum); psum = vec_add(v32ss, psum); psum = vec_sr(psum, v6us); - + vector unsigned char vdst = vec_ld(0, dst); - vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum); - + vector unsigned char ppsum = (vector unsigned char)vec_pack(psum, psum); + vector unsigned char vfdst = vec_perm(vdst, ppsum, fperm); vector unsigned char fsum; - + OP_U8_ALTIVEC(fsum, vfdst, vdst); vec_st(fsum, 0, dst); - + vsrc0ssH = vsrc2ssH; vsrc1ssH = vsrc3ssH; - + dst += stride; src += stride; } @@ -159,7 +159,7 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, i POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_h_lowpass_num, 1); POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); register int i; - + const vector signed int vzero = vec_splat_s32(0); const vector unsigned char permM2 = vec_lvsl(-2, src); const vector unsigned char permM1 = vec_lvsl(-1, src); @@ -258,13 +258,13 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, i const vector signed short sum2B = vec_adds(srcM1B, srcP2B); const vector signed short sum3A = vec_adds(srcM2A, srcP3A); const vector signed short sum3B = vec_adds(srcM2B, srcP3B); - + const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss); const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss); const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); - + const vector signed short pp3A = vec_add(sum3A, pp1A); const vector signed short pp3B = vec_add(sum3B, pp1B); @@ -300,7 +300,7 @@ POWERPC_PERF_STOP_COUNT(PREFIX_h264_qpel16_h_lowpass_num, 1); static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) { POWERPC_PERF_DECLARE(PREFIX_h264_qpel16_v_lowpass_num, 1); POWERPC_PERF_START_COUNT(PREFIX_h264_qpel16_v_lowpass_num, 1); - + register int i; const vector signed int vzero = vec_splat_s32(0); @@ -312,7 +312,7 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, i const vector unsigned char dstperm = vec_lvsr(0, dst); const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); - + uint8_t *srcbis = src - (srcStride * 2); const vector unsigned char srcM2a = vec_ld(0, srcbis); @@ -372,13 +372,13 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, i srcP1ssB = srcP2ssB; srcP2ssA = srcP3ssA; srcP2ssB = srcP3ssB; - + const vector signed short pp1A = vec_mladd(sum1A, v20ss, v16ss); const vector signed short pp1B = vec_mladd(sum1B, v20ss, v16ss); const vector signed short pp2A = vec_mladd(sum2A, v5ss, (vector signed short)vzero); const vector signed short pp2B = vec_mladd(sum2B, v5ss, (vector signed short)vzero); - + const vector signed short pp3A = vec_add(sum3A, pp1A); const vector signed short pp3B = vec_add(sum3B, pp1B); @@ -513,7 +513,7 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, const vector signed short sum2B = vec_adds(srcM1B, srcP2B); const vector signed short sum3A = vec_adds(srcM2A, srcP3A); const vector signed short sum3B = vec_adds(srcM2B, srcP3B); - + const vector signed short pp1A = vec_mladd(sum1A, v20ss, sum3A); const vector signed short pp1B = vec_mladd(sum1B, v20ss, sum3B); @@ -525,18 +525,18 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, vec_st(psumA, 0, tmp); vec_st(psumB, 16, tmp); - + src += srcStride; tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */ } - + const vector unsigned char dstperm = vec_lvsr(0, dst); const vector unsigned char neg1 = (const vector unsigned char)vec_splat_s8(-1); const vector unsigned char dstmask = vec_perm((const vector unsigned char)vzero, neg1, dstperm); const vector unsigned char mperm = (const vector unsigned char) AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B, 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F); - + int16_t *tmpbis = tmp - (tmpStride * 21); vector signed short tmpM2ssA = vec_ld(0, tmpbis); @@ -607,7 +607,7 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, const vector signed int sumAo = vec_add(pp1cAo, pp32Ao); const vector signed int sumBe = vec_add(pp1cBe, pp32Be); const vector signed int sumBo = vec_add(pp1cBo, pp32Bo); - + const vector signed int ssumAe = vec_sra(sumAe, v10ui); const vector signed int ssumAo = vec_sra(sumAo, v10ui); const vector signed int ssumBe = vec_sra(sumBe, v10ui); diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c index 776f4235c..d5f55b80f 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c +++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.c @@ -15,7 +15,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "../dsputil.h" @@ -87,16 +87,16 @@ void powerpc_display_perf_report(void) { for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++) { - if (perfdata[j][i][powerpc_data_num] != (unsigned long long)0) - av_log(NULL, AV_LOG_INFO, - " Function \"%s\" (pmc%d):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n", - perfname[i], - j+1, - perfdata[j][i][powerpc_data_min], - perfdata[j][i][powerpc_data_max], - (double)perfdata[j][i][powerpc_data_sum] / - (double)perfdata[j][i][powerpc_data_num], - perfdata[j][i][powerpc_data_num]); + if (perfdata[j][i][powerpc_data_num] != (unsigned long long)0) + av_log(NULL, AV_LOG_INFO, + " Function \"%s\" (pmc%d):\n\tmin: %llu\n\tmax: %llu\n\tavg: %1.2lf (%llu)\n", + perfname[i], + j+1, + perfdata[j][i][powerpc_data_min], + perfdata[j][i][powerpc_data_max], + (double)perfdata[j][i][powerpc_data_sum] / + (double)perfdata[j][i][powerpc_data_num], + perfdata[j][i][powerpc_data_num]); } } } @@ -179,7 +179,7 @@ POWERPC_PERF_START_COUNT(powerpc_clear_blocks_dcbz128, 1); } else for ( ; i < sizeof(DCTELEM)*6*64 ; i += 128) { - asm volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory"); + asm volatile("dcbzl %0,%1" : : "b" (blocks), "r" (i) : "memory"); } #else memset(blocks, 0, sizeof(DCTELEM)*6*64); @@ -227,7 +227,7 @@ long check_dcbzl_effect(void) } av_free(fakedata); - + return count; } #else @@ -257,10 +257,10 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) #ifdef HAVE_ALTIVEC dsputil_h264_init_ppc(c, avctx); - + if (has_altivec()) { mm_flags |= MM_ALTIVEC; - + // Altivec specific optimisations c->pix_abs[0][1] = sad16_x2_altivec; c->pix_abs[0][2] = sad16_y2_altivec; @@ -284,25 +284,25 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) c->put_no_rnd_pixels_tab[0][0] = put_pixels16_altivec; c->avg_pixels_tab[0][0] = avg_pixels16_altivec; c->avg_pixels_tab[1][0] = avg_pixels8_altivec; - c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec; + c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec; c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec; c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec; c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec; c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec; - - c->gmc1 = gmc1_altivec; + + c->gmc1 = gmc1_altivec; #ifdef CONFIG_DARWIN // ATM gcc-3.3 and gcc-3.4 fail to compile these in linux... - c->hadamard8_diff[0] = hadamard8_diff16_altivec; - c->hadamard8_diff[1] = hadamard8_diff8x8_altivec; + c->hadamard8_diff[0] = hadamard8_diff16_altivec; + c->hadamard8_diff[1] = hadamard8_diff8x8_altivec; #endif #ifdef CONFIG_ENCODERS - if (avctx->dct_algo == FF_DCT_AUTO || - avctx->dct_algo == FF_DCT_ALTIVEC) - { - c->fdct = fdct_altivec; - } + if (avctx->dct_algo == FF_DCT_AUTO || + avctx->dct_algo == FF_DCT_ALTIVEC) + { + c->fdct = fdct_altivec; + } #endif //CONFIG_ENCODERS if (avctx->lowres==0) @@ -319,20 +319,20 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx) #endif /* ALTIVEC_USE_REFERENCE_C_CODE */ } } - + #ifdef POWERPC_PERFORMANCE_REPORT { int i, j; for (i = 0 ; i < powerpc_perf_total ; i++) { - for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++) - { - perfdata[j][i][powerpc_data_min] = 0xFFFFFFFFFFFFFFFFULL; - perfdata[j][i][powerpc_data_max] = 0x0000000000000000ULL; - perfdata[j][i][powerpc_data_sum] = 0x0000000000000000ULL; - perfdata[j][i][powerpc_data_num] = 0x0000000000000000ULL; - } - } + for (j = 0; j < POWERPC_NUM_PMC_ENABLED ; j++) + { + perfdata[j][i][powerpc_data_min] = 0xFFFFFFFFFFFFFFFFULL; + perfdata[j][i][powerpc_data_max] = 0x0000000000000000ULL; + perfdata[j][i][powerpc_data_sum] = 0x0000000000000000ULL; + perfdata[j][i][powerpc_data_num] = 0x0000000000000000ULL; + } + } } #endif /* POWERPC_PERFORMANCE_REPORT */ } else diff --git a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h index 7e01677f1..966ffa71a 100644 --- a/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h +++ b/src/libffmpeg/libavcodec/ppc/dsputil_ppc.h @@ -13,7 +13,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #ifndef _DSPUTIL_PPC_ @@ -114,10 +114,10 @@ extern unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][ #define POWERPC_GET_PMC6(a) do {} while (0) #endif #endif /* POWERPC_MODE_64BITS */ -#define POWERPC_PERF_DECLARE(a, cond) \ - POWERP_PMC_DATATYPE \ - pmc_start[POWERPC_NUM_PMC_ENABLED], \ - pmc_stop[POWERPC_NUM_PMC_ENABLED], \ +#define POWERPC_PERF_DECLARE(a, cond) \ + POWERP_PMC_DATATYPE \ + pmc_start[POWERPC_NUM_PMC_ENABLED], \ + pmc_stop[POWERPC_NUM_PMC_ENABLED], \ pmc_loop_index; #define POWERPC_PERF_START_COUNT(a, cond) do { \ POWERPC_GET_PMC6(pmc_start[5]); \ @@ -141,8 +141,8 @@ extern unsigned long long perfdata[POWERPC_NUM_PMC_ENABLED][powerpc_perf_total][ pmc_loop_index++) \ { \ if (pmc_stop[pmc_loop_index] >= pmc_start[pmc_loop_index]) \ - { \ - POWERP_PMC_DATATYPE diff = \ + { \ + POWERP_PMC_DATATYPE diff = \ pmc_stop[pmc_loop_index] - pmc_start[pmc_loop_index]; \ if (diff < perfdata[pmc_loop_index][a][powerpc_data_min]) \ perfdata[pmc_loop_index][a][powerpc_data_min] = diff; \ diff --git a/src/libffmpeg/libavcodec/ppc/fdct_altivec.c b/src/libffmpeg/libavcodec/ppc/fdct_altivec.c index b38b909c6..f5778c24e 100644 --- a/src/libffmpeg/libavcodec/ppc/fdct_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/fdct_altivec.c @@ -14,7 +14,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ diff --git a/src/libffmpeg/libavcodec/ppc/fft_altivec.c b/src/libffmpeg/libavcodec/ppc/fft_altivec.c index 29d85e87d..f4ea78359 100644 --- a/src/libffmpeg/libavcodec/ppc/fft_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/fft_altivec.c @@ -16,7 +16,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "../dsputil.h" @@ -65,15 +65,15 @@ void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z) POWERPC_PERF_DECLARE(altivec_fft_num, s->nbits >= 6); #ifdef ALTIVEC_USE_REFERENCE_C_CODE int ln = s->nbits; - int j, np, np2; - int nblocks, nloops; + int j, np, np2; + int nblocks, nloops; register FFTComplex *p, *q; FFTComplex *exptab = s->exptab; int l; FFTSample tmp_re, tmp_im; - + POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); - + np = 1 << ln; /* pass 0 */ @@ -81,29 +81,29 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); p=&z[0]; j=(np >> 1); do { - BF(p[0].re, p[0].im, p[1].re, p[1].im, + BF(p[0].re, p[0].im, p[1].re, p[1].im, p[0].re, p[0].im, p[1].re, p[1].im); p+=2; } while (--j != 0); /* pass 1 */ - + p=&z[0]; j=np >> 2; if (s->inverse) { do { - BF(p[0].re, p[0].im, p[2].re, p[2].im, + BF(p[0].re, p[0].im, p[2].re, p[2].im, p[0].re, p[0].im, p[2].re, p[2].im); - BF(p[1].re, p[1].im, p[3].re, p[3].im, + BF(p[1].re, p[1].im, p[3].re, p[3].im, p[1].re, p[1].im, -p[3].im, p[3].re); p+=4; } while (--j != 0); } else { do { - BF(p[0].re, p[0].im, p[2].re, p[2].im, + BF(p[0].re, p[0].im, p[2].re, p[2].im, p[0].re, p[0].im, p[2].re, p[2].im); - BF(p[1].re, p[1].im, p[3].re, p[3].im, + BF(p[1].re, p[1].im, p[3].re, p[3].im, p[1].re, p[1].im, p[3].im, -p[3].re); p+=4; } while (--j != 0); @@ -119,7 +119,7 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); for (j = 0; j < nblocks; ++j) { BF(p->re, p->im, q->re, q->im, p->re, p->im, q->re, q->im); - + p++; q++; for(l = nblocks; l < np2; l += nblocks) { @@ -145,10 +145,10 @@ POWERPC_PERF_STOP_COUNT(altivec_fft_num, s->nbits >= 6); #else register const vector float vczero = (const vector float){0.,0.,0.,0.}; #endif - + int ln = s->nbits; - int j, np, np2; - int nblocks, nloops; + int j, np, np2; + int nblocks, nloops; register FFTComplex *p, *q; FFTComplex *cptr, *cptr1; int k; @@ -163,7 +163,7 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); r = (vector float *)&z[0]; c1 = vcii(p,p,n,n); - + if (s->inverse) { c2 = vcii(p,p,n,p); @@ -172,27 +172,27 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); { c2 = vcii(p,p,p,n); } - + j = (np >> 2); do { a = vec_ld(0, r); a1 = vec_ld(sizeof(vector float), r); - + b = vec_perm(a,a,vcprmle(1,0,3,2)); a = vec_madd(a,c1,b); /* do the pass 0 butterfly */ - + b = vec_perm(a1,a1,vcprmle(1,0,3,2)); b = vec_madd(a1,c1,b); /* do the pass 0 butterfly */ - + /* multiply third by -i */ b = vec_perm(b,b,vcprmle(2,3,1,0)); - + /* do the pass 1 butterfly */ vec_st(vec_madd(b,c2,a), 0, r); vec_st(vec_nmsub(b,c2,a), sizeof(vector float), r); - + r += 2; } while (--j != 0); } @@ -215,7 +215,7 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); a = vec_ld(0, (float*)p); b = vec_ld(0, (float*)q); - + /* complex mul */ c = vec_ld(0, (float*)cptr); /* cre*re cim*re */ @@ -223,16 +223,16 @@ POWERPC_PERF_START_COUNT(altivec_fft_num, s->nbits >= 6); c = vec_ld(sizeof(vector float), (float*)cptr); /* -cim*im cre*im */ b = vec_madd(c, vec_perm(b,b,vcprmle(3,3,1,1)),t1); - + /* butterfly */ vec_st(vec_add(a,b), 0, (float*)p); vec_st(vec_sub(a,b), 0, (float*)q); - + p += 2; q += 2; cptr += 4; } while (--k); - + p += nloops; q += nloops; } while (--j); diff --git a/src/libffmpeg/libavcodec/ppc/gcc_fixes.h b/src/libffmpeg/libavcodec/ppc/gcc_fixes.h index 13d4ff12e..288fdf834 100644 --- a/src/libffmpeg/libavcodec/ppc/gcc_fixes.h +++ b/src/libffmpeg/libavcodec/ppc/gcc_fixes.h @@ -1,6 +1,6 @@ /* * gcc fixes for altivec. - * Used to workaround broken gcc (FSF gcc-3 pre gcc-3.3) + * Used to workaround broken gcc (FSF gcc-3 pre gcc-3.3) * and to stay somewhat compatible with Darwin. */ @@ -19,7 +19,7 @@ # endif #else #define AVV(x...) {x} -#if (__GNUC__ * 100 + __GNUC_MINOR__ < 303) +#if (__GNUC__ * 100 + __GNUC_MINOR__ < 303) /* This code was provided to me by Bartosch Pixa * as a separate header file (broken_mergel.h). @@ -30,37 +30,37 @@ */ static inline vector signed char ff_vmrglb (vector signed char const A, - vector signed char const B) + vector signed char const B) { static const vector unsigned char lowbyte = { - 0x08, 0x18, 0x09, 0x19, 0x0a, 0x1a, 0x0b, 0x1b, - 0x0c, 0x1c, 0x0d, 0x1d, 0x0e, 0x1e, 0x0f, 0x1f + 0x08, 0x18, 0x09, 0x19, 0x0a, 0x1a, 0x0b, 0x1b, + 0x0c, 0x1c, 0x0d, 0x1d, 0x0e, 0x1e, 0x0f, 0x1f }; return vec_perm (A, B, lowbyte); } static inline vector signed short ff_vmrglh (vector signed short const A, - vector signed short const B) + vector signed short const B) { static const vector unsigned char lowhalf = { - 0x08, 0x09, 0x18, 0x19, 0x0a, 0x0b, 0x1a, 0x1b, - 0x0c, 0x0d, 0x1c, 0x1d, 0x0e, 0x0f, 0x1e, 0x1f + 0x08, 0x09, 0x18, 0x19, 0x0a, 0x0b, 0x1a, 0x1b, + 0x0c, 0x0d, 0x1c, 0x1d, 0x0e, 0x0f, 0x1e, 0x1f }; return vec_perm (A, B, lowhalf); } static inline vector signed int ff_vmrglw (vector signed int const A, - vector signed int const B) + vector signed int const B) { static const vector unsigned char lowword = { - 0x08, 0x09, 0x0a, 0x0b, 0x18, 0x19, 0x1a, 0x1b, - 0x0c, 0x0d, 0x0e, 0x0f, 0x1c, 0x1d, 0x1e, 0x1f + 0x08, 0x09, 0x0a, 0x0b, 0x18, 0x19, 0x1a, 0x1b, + 0x0c, 0x0d, 0x0e, 0x0f, 0x1c, 0x1d, 0x1e, 0x1f }; return vec_perm (A, B, lowword); } -/*#define ff_vmrglb ff_vmrglb -#define ff_vmrglh ff_vmrglh -#define ff_vmrglw ff_vmrglw +/*#define ff_vmrglb ff_vmrglb +#define ff_vmrglh ff_vmrglh +#define ff_vmrglw ff_vmrglw */ #undef vec_mergel diff --git a/src/libffmpeg/libavcodec/ppc/gmc_altivec.c b/src/libffmpeg/libavcodec/ppc/gmc_altivec.c index 344821685..04978d825 100644 --- a/src/libffmpeg/libavcodec/ppc/gmc_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/gmc_altivec.c @@ -15,7 +15,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "../dsputil.h" @@ -40,7 +40,7 @@ POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND); int i; POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); - + for(i=0; i<h; i++) { dst[0]= (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + rounder)>>8; @@ -87,7 +87,7 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); Dv = vec_splat(tempA, 3); rounderV = vec_ld(0, (unsigned short*)rounder_a); - + // we'll be able to pick-up our 9 char elements // at src from those 32 bytes // we load the first batch here, as inside the loop @@ -96,7 +96,7 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); src_0 = vec_ld(0, src); src_1 = vec_ld(16, src); srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src)); - + if (src_really_odd != 0x0000000F) { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector. srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src)); @@ -107,14 +107,14 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); } srcvA = vec_mergeh(vczero, srcvA); srcvB = vec_mergeh(vczero, srcvB); - + for(i=0; i<h; i++) { dst_odd = (unsigned long)dst & 0x0000000F; src_really_odd = (((unsigned long)src) + stride) & 0x0000000F; - + dstv = vec_ld(0, dst); - + // we we'll be able to pick-up our 9 char elements // at src + stride from those 32 bytes // then reuse the resulting 2 vectors srvcC and srcvD @@ -122,7 +122,7 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); src_0 = vec_ld(stride + 0, src); src_1 = vec_ld(stride + 16, src); srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src)); - + if (src_really_odd != 0x0000000F) { // if src & 0xF == 0xF, then (src+1) is properly aligned on the second vector. srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src)); @@ -131,10 +131,10 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); { srcvD = src_1; } - + srcvC = vec_mergeh(vczero, srcvC); srcvD = vec_mergeh(vczero, srcvD); - + // OK, now we (finally) do the math :-) // those four instructions replaces 32 int muls & 32 int adds. @@ -143,14 +143,14 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); tempB = vec_mladd((vector unsigned short)srcvB, Bv, tempA); tempC = vec_mladd((vector unsigned short)srcvC, Cv, tempB); tempD = vec_mladd((vector unsigned short)srcvD, Dv, tempC); - + srcvA = srcvC; srcvB = srcvD; - + tempD = vec_sr(tempD, vcsr8); - + dstv2 = vec_pack(tempD, (vector unsigned short)vczero); - + if (dst_odd) { dstv2 = vec_perm(dstv, dstv2, vcprm(0,1,s0,s1)); @@ -159,9 +159,9 @@ POWERPC_PERF_START_COUNT(altivec_gmc1_num, GMC1_PERF_COND); { dstv2 = vec_perm(dstv, dstv2, vcprm(s0,s1,2,3)); } - + vec_st(dstv2, 0, dst); - + dst += stride; src += stride; } diff --git a/src/libffmpeg/libavcodec/ppc/idct_altivec.c b/src/libffmpeg/libavcodec/ppc/idct_altivec.c index 3445adadd..93d63cfd3 100644 --- a/src/libffmpeg/libavcodec/ppc/idct_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/idct_altivec.c @@ -13,7 +13,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * */ @@ -51,108 +51,108 @@ #define vector_s32_t vector signed int #define vector_u32_t vector unsigned int -#define IDCT_HALF \ - /* 1st stage */ \ - t1 = vec_mradds (a1, vx7, vx1 ); \ - t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \ - t7 = vec_mradds (a2, vx5, vx3); \ - t3 = vec_mradds (ma2, vx3, vx5); \ - \ - /* 2nd stage */ \ - t5 = vec_adds (vx0, vx4); \ - t0 = vec_subs (vx0, vx4); \ - t2 = vec_mradds (a0, vx6, vx2); \ - t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \ - t6 = vec_adds (t8, t3); \ - t3 = vec_subs (t8, t3); \ - t8 = vec_subs (t1, t7); \ - t1 = vec_adds (t1, t7); \ - \ - /* 3rd stage */ \ - t7 = vec_adds (t5, t2); \ - t2 = vec_subs (t5, t2); \ - t5 = vec_adds (t0, t4); \ - t0 = vec_subs (t0, t4); \ - t4 = vec_subs (t8, t3); \ - t3 = vec_adds (t8, t3); \ - \ - /* 4th stage */ \ - vy0 = vec_adds (t7, t1); \ - vy7 = vec_subs (t7, t1); \ - vy1 = vec_mradds (c4, t3, t5); \ - vy6 = vec_mradds (mc4, t3, t5); \ - vy2 = vec_mradds (c4, t4, t0); \ - vy5 = vec_mradds (mc4, t4, t0); \ - vy3 = vec_adds (t2, t6); \ +#define IDCT_HALF \ + /* 1st stage */ \ + t1 = vec_mradds (a1, vx7, vx1 ); \ + t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \ + t7 = vec_mradds (a2, vx5, vx3); \ + t3 = vec_mradds (ma2, vx3, vx5); \ + \ + /* 2nd stage */ \ + t5 = vec_adds (vx0, vx4); \ + t0 = vec_subs (vx0, vx4); \ + t2 = vec_mradds (a0, vx6, vx2); \ + t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \ + t6 = vec_adds (t8, t3); \ + t3 = vec_subs (t8, t3); \ + t8 = vec_subs (t1, t7); \ + t1 = vec_adds (t1, t7); \ + \ + /* 3rd stage */ \ + t7 = vec_adds (t5, t2); \ + t2 = vec_subs (t5, t2); \ + t5 = vec_adds (t0, t4); \ + t0 = vec_subs (t0, t4); \ + t4 = vec_subs (t8, t3); \ + t3 = vec_adds (t8, t3); \ + \ + /* 4th stage */ \ + vy0 = vec_adds (t7, t1); \ + vy7 = vec_subs (t7, t1); \ + vy1 = vec_mradds (c4, t3, t5); \ + vy6 = vec_mradds (mc4, t3, t5); \ + vy2 = vec_mradds (c4, t4, t0); \ + vy5 = vec_mradds (mc4, t4, t0); \ + vy3 = vec_adds (t2, t6); \ vy4 = vec_subs (t2, t6); - -#define IDCT \ - vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \ - vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \ - vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \ - vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \ - vector_u16_t shift; \ - \ - c4 = vec_splat (constants[0], 0); \ - a0 = vec_splat (constants[0], 1); \ - a1 = vec_splat (constants[0], 2); \ - a2 = vec_splat (constants[0], 3); \ - mc4 = vec_splat (constants[0], 4); \ - ma2 = vec_splat (constants[0], 5); \ - bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \ - \ - zero = vec_splat_s16 (0); \ - shift = vec_splat_u16 (4); \ - \ - vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \ - vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \ - vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \ - vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \ - vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \ - vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \ - vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \ - vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \ - \ - IDCT_HALF \ - \ - vx0 = vec_mergeh (vy0, vy4); \ - vx1 = vec_mergel (vy0, vy4); \ - vx2 = vec_mergeh (vy1, vy5); \ - vx3 = vec_mergel (vy1, vy5); \ - vx4 = vec_mergeh (vy2, vy6); \ - vx5 = vec_mergel (vy2, vy6); \ - vx6 = vec_mergeh (vy3, vy7); \ - vx7 = vec_mergel (vy3, vy7); \ - \ - vy0 = vec_mergeh (vx0, vx4); \ - vy1 = vec_mergel (vx0, vx4); \ - vy2 = vec_mergeh (vx1, vx5); \ - vy3 = vec_mergel (vx1, vx5); \ - vy4 = vec_mergeh (vx2, vx6); \ - vy5 = vec_mergel (vx2, vx6); \ - vy6 = vec_mergeh (vx3, vx7); \ - vy7 = vec_mergel (vx3, vx7); \ - \ - vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \ - vx1 = vec_mergel (vy0, vy4); \ - vx2 = vec_mergeh (vy1, vy5); \ - vx3 = vec_mergel (vy1, vy5); \ - vx4 = vec_mergeh (vy2, vy6); \ - vx5 = vec_mergel (vy2, vy6); \ - vx6 = vec_mergeh (vy3, vy7); \ - vx7 = vec_mergel (vy3, vy7); \ - \ - IDCT_HALF \ - \ - shift = vec_splat_u16 (6); \ - vx0 = vec_sra (vy0, shift); \ - vx1 = vec_sra (vy1, shift); \ - vx2 = vec_sra (vy2, shift); \ - vx3 = vec_sra (vy3, shift); \ - vx4 = vec_sra (vy4, shift); \ - vx5 = vec_sra (vy5, shift); \ - vx6 = vec_sra (vy6, shift); \ + +#define IDCT \ + vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \ + vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \ + vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \ + vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \ + vector_u16_t shift; \ + \ + c4 = vec_splat (constants[0], 0); \ + a0 = vec_splat (constants[0], 1); \ + a1 = vec_splat (constants[0], 2); \ + a2 = vec_splat (constants[0], 3); \ + mc4 = vec_splat (constants[0], 4); \ + ma2 = vec_splat (constants[0], 5); \ + bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \ + \ + zero = vec_splat_s16 (0); \ + shift = vec_splat_u16 (4); \ + \ + vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \ + vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \ + vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \ + vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \ + vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \ + vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \ + vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \ + vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \ + \ + IDCT_HALF \ + \ + vx0 = vec_mergeh (vy0, vy4); \ + vx1 = vec_mergel (vy0, vy4); \ + vx2 = vec_mergeh (vy1, vy5); \ + vx3 = vec_mergel (vy1, vy5); \ + vx4 = vec_mergeh (vy2, vy6); \ + vx5 = vec_mergel (vy2, vy6); \ + vx6 = vec_mergeh (vy3, vy7); \ + vx7 = vec_mergel (vy3, vy7); \ + \ + vy0 = vec_mergeh (vx0, vx4); \ + vy1 = vec_mergel (vx0, vx4); \ + vy2 = vec_mergeh (vx1, vx5); \ + vy3 = vec_mergel (vx1, vx5); \ + vy4 = vec_mergeh (vx2, vx6); \ + vy5 = vec_mergel (vx2, vx6); \ + vy6 = vec_mergeh (vx3, vx7); \ + vy7 = vec_mergel (vx3, vx7); \ + \ + vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \ + vx1 = vec_mergel (vy0, vy4); \ + vx2 = vec_mergeh (vy1, vy5); \ + vx3 = vec_mergel (vy1, vy5); \ + vx4 = vec_mergeh (vy2, vy6); \ + vx5 = vec_mergel (vy2, vy6); \ + vx6 = vec_mergeh (vy3, vy7); \ + vx7 = vec_mergel (vy3, vy7); \ + \ + IDCT_HALF \ + \ + shift = vec_splat_u16 (6); \ + vx0 = vec_sra (vy0, shift); \ + vx1 = vec_sra (vy1, shift); \ + vx2 = vec_sra (vy2, shift); \ + vx3 = vec_sra (vy3, shift); \ + vx4 = vec_sra (vy4, shift); \ + vx5 = vec_sra (vy5, shift); \ + vx6 = vec_sra (vy6, shift); \ vx7 = vec_sra (vy7, shift); @@ -180,18 +180,18 @@ POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1); #endif IDCT -#define COPY(dest,src) \ - tmp = vec_packsu (src, src); \ - vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ +#define COPY(dest,src) \ + tmp = vec_packsu (src, src); \ + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); - COPY (dest, vx0) dest += stride; - COPY (dest, vx1) dest += stride; - COPY (dest, vx2) dest += stride; - COPY (dest, vx3) dest += stride; - COPY (dest, vx4) dest += stride; - COPY (dest, vx5) dest += stride; - COPY (dest, vx6) dest += stride; + COPY (dest, vx0) dest += stride; + COPY (dest, vx1) dest += stride; + COPY (dest, vx2) dest += stride; + COPY (dest, vx3) dest += stride; + COPY (dest, vx4) dest += stride; + COPY (dest, vx5) dest += stride; + COPY (dest, vx6) dest += stride; COPY (dest, vx7) POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1); @@ -225,22 +225,22 @@ POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1); perm0 = vec_mergeh (p, p0); perm1 = vec_mergeh (p, p1); -#define ADD(dest,src,perm) \ - /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ - tmp = vec_ld (0, dest); \ - tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \ - tmp3 = vec_adds (tmp2, src); \ - tmp = vec_packsu (tmp3, tmp3); \ - vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ +#define ADD(dest,src,perm) \ + /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \ + tmp = vec_ld (0, dest); \ + tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \ + tmp3 = vec_adds (tmp2, src); \ + tmp = vec_packsu (tmp3, tmp3); \ + vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest); - ADD (dest, vx0, perm0) dest += stride; - ADD (dest, vx1, perm1) dest += stride; - ADD (dest, vx2, perm0) dest += stride; - ADD (dest, vx3, perm1) dest += stride; - ADD (dest, vx4, perm0) dest += stride; - ADD (dest, vx5, perm1) dest += stride; - ADD (dest, vx6, perm0) dest += stride; + ADD (dest, vx0, perm0) dest += stride; + ADD (dest, vx1, perm1) dest += stride; + ADD (dest, vx2, perm0) dest += stride; + ADD (dest, vx3, perm1) dest += stride; + ADD (dest, vx4, perm0) dest += stride; + ADD (dest, vx5, perm1) dest += stride; + ADD (dest, vx6, perm0) dest += stride; ADD (dest, vx7, perm1) POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1); diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c b/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c index 91e744af9..7a771a8ec 100644 --- a/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c +++ b/src/libffmpeg/libavcodec/ppc/mpegvideo_altivec.c @@ -16,7 +16,7 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> @@ -25,7 +25,7 @@ #include "../mpegvideo.h" #include "gcc_fixes.h" - + #include "dsputil_altivec.h" // Swaps two variables (used for altivec registers) @@ -103,7 +103,7 @@ do { \ // slower, for dumb non-apple GCC #define FOUROF(a) {a,a,a,a} #endif -int dct_quantize_altivec(MpegEncContext* s, +int dct_quantize_altivec(MpegEncContext* s, DCTELEM* data, int n, int qscale, int* overflow) { @@ -152,9 +152,9 @@ int dct_quantize_altivec(MpegEncContext* s, } // The following block could exist as a separate an altivec dct - // function. However, if we put it inline, the DCT data can remain - // in the vector local variables, as floats, which we'll use during the - // quantize step... + // function. However, if we put it inline, the DCT data can remain + // in the vector local variables, as floats, which we'll use during the + // quantize step... { const vector float vec_0_298631336 = (vector float)FOUROF(0.298631336f); const vector float vec_0_390180644 = (vector float)FOUROF(-0.390180644f); @@ -206,11 +206,11 @@ int dct_quantize_altivec(MpegEncContext* s, z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero); // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), - // CONST_BITS-PASS1_BITS); + // CONST_BITS-PASS1_BITS); row2 = vec_madd(tmp13, vec_0_765366865, z1); // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), - // CONST_BITS-PASS1_BITS); + // CONST_BITS-PASS1_BITS); row6 = vec_madd(tmp12, vec_1_847759065, z1); z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7; @@ -273,7 +273,7 @@ int dct_quantize_altivec(MpegEncContext* s, if (whichPass == 1) { // transpose the data for the second pass - + // First, block transpose the upper right with lower left. SWAP(row4, alt0); SWAP(row5, alt1); @@ -315,7 +315,7 @@ int dct_quantize_altivec(MpegEncContext* s, } // Load the bias vector (We add 0.5 to the bias so that we're - // rounding when we convert to int, instead of flooring.) + // rounding when we convert to int, instead of flooring.) { vector signed int biasInt; const vector float negOneFloat = (vector float)FOUROF(-1.0f); @@ -380,7 +380,7 @@ int dct_quantize_altivec(MpegEncContext* s, vec_cmpgt(alt7, zero)); } - + } // Store the data back into the original block @@ -469,7 +469,7 @@ int dct_quantize_altivec(MpegEncContext* s, vec_ste(scanIndices_01, 0, &lastNonZeroChar); lastNonZero = lastNonZeroChar; - + // While the data is still in vectors we check for the transpose IDCT permute // and handle it using the vector unit if we can. This is the permute used // by the altivec idct, so it is common when using the altivec dct. @@ -523,30 +523,30 @@ int dct_quantize_altivec(MpegEncContext* s, AltiVec version of dct_unquantize_h263 this code assumes `block' is 16 bytes-aligned */ -void dct_unquantize_h263_altivec(MpegEncContext *s, +void dct_unquantize_h263_altivec(MpegEncContext *s, DCTELEM *block, int n, int qscale) { POWERPC_PERF_DECLARE(altivec_dct_unquantize_h263_num, 1); int i, level, qmul, qadd; int nCoeffs; - + assert(s->block_last_index[n]>=0); POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1); - + qadd = (qscale - 1) | 1; qmul = qscale << 1; - + if (s->mb_intra) { if (!s->h263_aic) { - if (n < 4) + if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; }else qadd = 0; i = 1; - nCoeffs= 63; //does not allways use zigzag table + nCoeffs= 63; //does not allways use zigzag table } else { i = 0; nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]; @@ -586,7 +586,7 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1); register vector bool short blockv_null, blockv_neg; register short backup_0 = block[0]; register int j = 0; - + qmulv = vec_ld(0, qmul8); qaddv = vec_ld(0, qadd8); nqaddv = vec_ld(0, nqadd8); @@ -605,7 +605,7 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1); } } #endif - + // vectorize all the 16 bytes-aligned blocks // of 8 elements for(; (j + 7) <= nCoeffs ; j+=8) @@ -637,7 +637,7 @@ POWERPC_PERF_START_COUNT(altivec_dct_unquantize_h263_num, 1); block[j] = level; } } - + if (i == 1) { // cheat. this avoid special-casing the first iteration block[0] = backup_0; diff --git a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c b/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c index 832baced0..b391b4294 100644 --- a/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c +++ b/src/libffmpeg/libavcodec/ppc/mpegvideo_ppc.c @@ -13,9 +13,9 @@ * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ - + #include "../dsputil.h" #include "../mpegvideo.h" #include <time.h> @@ -24,7 +24,7 @@ #include "dsputil_altivec.h" #endif -extern int dct_quantize_altivec(MpegEncContext *s, +extern int dct_quantize_altivec(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); extern void dct_unquantize_h263_altivec(MpegEncContext *s, |