diff options
author | Diego 'Flameeyes' Pettenò <flameeyes@gmail.com> | 2006-06-17 12:45:51 +0000 |
---|---|---|
committer | Diego 'Flameeyes' Pettenò <flameeyes@gmail.com> | 2006-06-17 12:45:51 +0000 |
commit | 5817595983c62f616fb31a6d3f297d47a9b20519 (patch) | |
tree | 5bde7c8a9b50f81daf12790dc01c5d6480de9cd6 /src | |
parent | 48afd7769daf349aa921a94b1c0dffce3f8f228b (diff) | |
download | xine-lib-5817595983c62f616fb31a6d3f297d47a9b20519.tar.gz xine-lib-5817595983c62f616fb31a6d3f297d47a9b20519.tar.bz2 |
Fix Altivec libmpeg2 decoding, thanks to Luca Barbato (lu_zero@gentoo.org) and Christer Palm (palm@nogui.se).
CVS patchset: 8045
CVS date: 2006/06/17 12:45:51
Diffstat (limited to 'src')
-rw-r--r-- | src/libmpeg2/idct_altivec.c | 502 | ||||
-rw-r--r-- | src/libmpeg2/mpeg2_internal.h | 9 | ||||
-rw-r--r-- | src/libmpeg2/slice.c | 10 | ||||
-rw-r--r-- | src/libmpeg2/slice_xvmc.c | 12 |
4 files changed, 28 insertions, 505 deletions
diff --git a/src/libmpeg2/idct_altivec.c b/src/libmpeg2/idct_altivec.c index 2ebdd3241..ed8b58cfd 100644 --- a/src/libmpeg2/idct_altivec.c +++ b/src/libmpeg2/idct_altivec.c @@ -25,493 +25,13 @@ #if defined (ARCH_PPC) && defined (ENABLE_ALTIVEC) +#include <altivec.h> + #include <inttypes.h> #include "mpeg2_internal.h" #include "xineutils.h" -#ifndef HOST_OS_DARWIN - -/* - * The asm code is generated with: - * - * gcc-2.95 -fvec -D__ALTIVEC__ -O9 -fomit-frame-pointer -mregnames -S - * idct_altivec.c - * - * awk '{args=""; len=split ($2, arg, ","); - * for (i=1; i<=len; i++) { a=arg[i]; if (i<len) a=a","; - * args = args sprintf ("%-6s", a) } - * printf ("\t\"\t%-16s%-24s\\n\"\n", $1, args) }' idct_altivec.s | - * unexpand -a - * - * I then do some simple trimming on the function prolog/trailers - */ - -void mpeg2_idct_copy_altivec (int16_t * block, uint8_t * dest, int stride) -{ - asm (" \n" - "# stwu %r1, -128(%r1) \n" - "# mflr %r0 \n" - "# stw %r0, 132(%r1) \n" - "# addi %r0, %r1, 128 \n" - "# bl _savev25 \n" - - " addi %r9, %r3, 112 \n" - " vspltish %v25, 4 \n" - " vxor %v13, %v13, %v13 \n" - " lis %r10, constants@ha \n" - " lvx %v1, 0, %r9 \n" - " la %r10, constants@l(%r10) \n" - " lvx %v5, 0, %r3 \n" - " addi %r9, %r3, 16 \n" - " lvx %v8, 0, %r10 \n" - " addi %r11, %r10, 32 \n" - " lvx %v12, 0, %r9 \n" - " lvx %v6, 0, %r11 \n" - " addi %r8, %r3, 48 \n" - " vslh %v1, %v1, %v25 \n" - " addi %r9, %r3, 80 \n" - " lvx %v11, 0, %r8 \n" - " vslh %v5, %v5, %v25 \n" - " lvx %v0, 0, %r9 \n" - " addi %r11, %r10, 64 \n" - " vsplth %v3, %v8, 2 \n" - " lvx %v7, 0, %r11 \n" - " addi %r9, %r3, 96 \n" - " vslh %v12, %v12, %v25 \n" - " vmhraddshs %v27, %v1, %v6, %v13 \n" - " addi %r8, %r3, 32 \n" - " vsplth %v2, %v8, 5 \n" - " lvx %v1, 0, %r9 \n" - " vslh %v11, %v11, %v25 \n" - " addi %r3, %r3, 64 \n" - " lvx %v9, 0, %r8 \n" - " addi %r9, %r10, 48 \n" - " vslh %v0, %v0, %v25 \n" - " lvx %v4, 0, %r9 \n" - " vmhraddshs %v31, %v12, %v6, %v13 \n" - " addi %r10, %r10, 16 \n" - " vmhraddshs %v30, %v0, %v7, %v13 \n" - " lvx %v10, 0, %r3 \n" - " vsplth %v19, %v8, 3 \n" - " vmhraddshs %v15, %v11, %v7, %v13 \n" - " lvx %v12, 0, %r10 \n" - " vsplth %v6, %v8, 4 \n" - " vslh %v1, %v1, %v25 \n" - " vsplth %v11, %v8, 1 \n" - " li %r9, 4 \n" - " vslh %v9, %v9, %v25 \n" - " vsplth %v7, %v8, 0 \n" - " vmhraddshs %v18, %v1, %v4, %v13 \n" - " vspltw %v8, %v8, 3 \n" - " vsubshs %v0, %v13, %v27 \n" - " vmhraddshs %v1, %v9, %v4, %v13 \n" - " vmhraddshs %v17, %v3, %v31, %v0 \n" - " vmhraddshs %v4, %v2, %v15, %v30 \n" - " vslh %v10, %v10, %v25 \n" - " vmhraddshs %v9, %v5, %v12, %v13 \n" - " vspltish %v25, 6 \n" - " vmhraddshs %v5, %v10, %v12, %v13 \n" - " vmhraddshs %v28, %v19, %v30, %v15 \n" - " vmhraddshs %v27, %v3, %v27, %v31 \n" - " vsubshs %v0, %v13, %v18 \n" - " vmhraddshs %v18, %v11, %v18, %v1 \n" - " vaddshs %v30, %v17, %v4 \n" - " vmhraddshs %v12, %v11, %v1, %v0 \n" - " vsubshs %v4, %v17, %v4 \n" - " vaddshs %v10, %v9, %v5 \n" - " vsubshs %v17, %v27, %v28 \n" - " vaddshs %v27, %v27, %v28 \n" - " vsubshs %v1, %v9, %v5 \n" - " vaddshs %v28, %v10, %v18 \n" - " vsubshs %v18, %v10, %v18 \n" - " vaddshs %v10, %v1, %v12 \n" - " vsubshs %v1, %v1, %v12 \n" - " vsubshs %v12, %v17, %v4 \n" - " vaddshs %v4, %v17, %v4 \n" - " vmhraddshs %v5, %v7, %v12, %v1 \n" - " vmhraddshs %v26, %v6, %v4, %v10 \n" - " vmhraddshs %v29, %v6, %v12, %v1 \n" - " vmhraddshs %v14, %v7, %v4, %v10 \n" - " vsubshs %v12, %v18, %v30 \n" - " vaddshs %v9, %v28, %v27 \n" - " vaddshs %v16, %v18, %v30 \n" - " vsubshs %v10, %v28, %v27 \n" - " vmrglh %v31, %v9, %v12 \n" - " vmrglh %v30, %v5, %v26 \n" - " vmrglh %v15, %v14, %v29 \n" - " vmrghh %v5, %v5, %v26 \n" - " vmrglh %v27, %v16, %v10 \n" - " vmrghh %v9, %v9, %v12 \n" - " vmrghh %v18, %v16, %v10 \n" - " vmrghh %v1, %v14, %v29 \n" - " vmrglh %v14, %v9, %v5 \n" - " vmrglh %v16, %v31, %v30 \n" - " vmrglh %v10, %v15, %v27 \n" - " vmrghh %v9, %v9, %v5 \n" - " vmrghh %v26, %v15, %v27 \n" - " vmrglh %v27, %v16, %v10 \n" - " vmrghh %v12, %v1, %v18 \n" - " vmrglh %v29, %v1, %v18 \n" - " vsubshs %v0, %v13, %v27 \n" - " vmrghh %v5, %v31, %v30 \n" - " vmrglh %v31, %v9, %v12 \n" - " vmrglh %v30, %v5, %v26 \n" - " vmrglh %v15, %v14, %v29 \n" - " vmhraddshs %v17, %v3, %v31, %v0 \n" - " vmrghh %v18, %v16, %v10 \n" - " vmhraddshs %v27, %v3, %v27, %v31 \n" - " vmhraddshs %v4, %v2, %v15, %v30 \n" - " vmrghh %v1, %v14, %v29 \n" - " vmhraddshs %v28, %v19, %v30, %v15 \n" - " vmrghh %v0, %v9, %v12 \n" - " vsubshs %v13, %v13, %v18 \n" - " vmrghh %v5, %v5, %v26 \n" - " vmhraddshs %v18, %v11, %v18, %v1 \n" - " vaddshs %v9, %v0, %v8 \n" - " vaddshs %v30, %v17, %v4 \n" - " vmhraddshs %v12, %v11, %v1, %v13 \n" - " vsubshs %v4, %v17, %v4 \n" - " vaddshs %v10, %v9, %v5 \n" - " vsubshs %v17, %v27, %v28 \n" - " vaddshs %v27, %v27, %v28 \n" - " vsubshs %v1, %v9, %v5 \n" - " vaddshs %v28, %v10, %v18 \n" - " vsubshs %v18, %v10, %v18 \n" - " vaddshs %v10, %v1, %v12 \n" - " vsubshs %v1, %v1, %v12 \n" - " vsubshs %v12, %v17, %v4 \n" - " vaddshs %v4, %v17, %v4 \n" - " vaddshs %v9, %v28, %v27 \n" - " vmhraddshs %v14, %v7, %v4, %v10 \n" - " vsrah %v9, %v9, %v25 \n" - " vmhraddshs %v5, %v7, %v12, %v1 \n" - " vpkshus %v0, %v9, %v9 \n" - " vmhraddshs %v29, %v6, %v12, %v1 \n" - " stvewx %v0, 0, %r4 \n" - " vaddshs %v16, %v18, %v30 \n" - " vsrah %v31, %v14, %v25 \n" - " stvewx %v0, %r9, %r4 \n" - " add %r4, %r4, %r5 \n" - " vsrah %v15, %v16, %v25 \n" - " vpkshus %v0, %v31, %v31 \n" - " vsrah %v1, %v5, %v25 \n" - " stvewx %v0, 0, %r4 \n" - " vsubshs %v12, %v18, %v30 \n" - " stvewx %v0, %r9, %r4 \n" - " vmhraddshs %v26, %v6, %v4, %v10 \n" - " vpkshus %v0, %v1, %v1 \n" - " add %r4, %r4, %r5 \n" - " vsrah %v5, %v12, %v25 \n" - " stvewx %v0, 0, %r4 \n" - " vsrah %v30, %v29, %v25 \n" - " stvewx %v0, %r9, %r4 \n" - " vsubshs %v10, %v28, %v27 \n" - " vpkshus %v0, %v15, %v15 \n" - " add %r4, %r4, %r5 \n" - " stvewx %v0, 0, %r4 \n" - " vsrah %v18, %v26, %v25 \n" - " stvewx %v0, %r9, %r4 \n" - " vsrah %v27, %v10, %v25 \n" - " vpkshus %v0, %v5, %v5 \n" - " add %r4, %r4, %r5 \n" - " stvewx %v0, 0, %r4 \n" - " stvewx %v0, %r9, %r4 \n" - " vpkshus %v0, %v30, %v30 \n" - " add %r4, %r4, %r5 \n" - " stvewx %v0, 0, %r4 \n" - " stvewx %v0, %r9, %r4 \n" - " vpkshus %v0, %v18, %v18 \n" - " add %r4, %r4, %r5 \n" - " stvewx %v0, 0, %r4 \n" - " stvewx %v0, %r9, %r4 \n" - " add %r4, %r4, %r5 \n" - " vpkshus %v0, %v27, %v27 \n" - " stvewx %v0, 0, %r4 \n" - " stvewx %v0, %r9, %r4 \n" - - "# addi %r0, %r1, 128 \n" - "# bl _restv25 \n" - "# lwz %r0, 132(%r1) \n" - "# mtlr %r0 \n" - "# la %r1, 128(%r1) \n" - " vxor %v1, %v1, %v1 \n" - " addi %r9, %r3, 16 \n" - " stvx %v1, 0, %r3 \n" - " stvx %v1, 0, %r9 \n" - " addi %r11, %r3, 32 \n" - " stvx %v1, 0, %r11 \n" - " addi %r9, %r3, 48 \n" - " stvx %v1, 0, %r9 \n" - " addi %r11, %r3, -64 \n" - " stvx %v1, 0, %r11 \n" - " addi %r9, %r3, -48 \n" - " stvx %v1, 0, %r9 \n" - " addi %r11, %r3, -32 \n" - " stvx %v1, 0, %r11 \n" - " addi %r3, %r3, -16 \n" - " stvx %v1, 0, %r3 \n" - ); -} - -void mpeg2_idct_add_altivec (int16_t * block, uint8_t * dest, int stride) -{ - asm (" \n" - "# stwu %r1, -192(%r1) \n" - "# mflr %r0 \n" - "# stw %r0, 196(%r1) \n" - "# addi %r0, %r1, 192 \n" - "# bl _savev21 \n" - - " addi %r9, %r3, 112 \n" - " vspltish %v21, 4 \n" - " vxor %v1, %v1, %v1 \n" - " lvx %v13, 0, %r9 \n" - " lis %r10, constants@ha \n" - " vspltisw %v3, -1 \n" - " la %r10, constants@l(%r10) \n" - " lvx %v5, 0, %r3 \n" - " addi %r9, %r3, 16 \n" - " lvx %v8, 0, %r10 \n" - " lvx %v12, 0, %r9 \n" - " addi %r11, %r10, 32 \n" - " lvx %v6, 0, %r11 \n" - " addi %r8, %r3, 48 \n" - " vslh %v13, %v13, %v21 \n" - " addi %r9, %r3, 80 \n" - " lvx %v11, 0, %r8 \n" - " vslh %v5, %v5, %v21 \n" - " lvx %v0, 0, %r9 \n" - " addi %r11, %r10, 64 \n" - " vsplth %v2, %v8, 2 \n" - " lvx %v7, 0, %r11 \n" - " vslh %v12, %v12, %v21 \n" - " addi %r9, %r3, 96 \n" - " vmhraddshs %v24, %v13, %v6, %v1 \n" - " addi %r8, %r3, 32 \n" - " vsplth %v17, %v8, 5 \n" - " lvx %v13, 0, %r9 \n" - " vslh %v11, %v11, %v21 \n" - " addi %r3, %r3, 64 \n" - " lvx %v10, 0, %r8 \n" - " vslh %v0, %v0, %v21 \n" - " addi %r9, %r10, 48 \n" - " vmhraddshs %v31, %v12, %v6, %v1 \n" - " lvx %v4, 0, %r9 \n" - " addi %r10, %r10, 16 \n" - " vmhraddshs %v26, %v0, %v7, %v1 \n" - " lvx %v9, 0, %r3 \n" - " vsplth %v16, %v8, 3 \n" - " vmhraddshs %v22, %v11, %v7, %v1 \n" - " lvx %v6, 0, %r10 \n" - " lvsl %v19, 0, %r4 \n" - " vsubshs %v12, %v1, %v24 \n" - " lvsl %v0, %r5, %r4 \n" - " vsplth %v11, %v8, 1 \n" - " vslh %v10, %v10, %v21 \n" - " vmrghb %v19, %v3, %v19 \n" - " lvx %v15, 0, %r4 \n" - " vslh %v13, %v13, %v21 \n" - " vmrghb %v3, %v3, %v0 \n" - " li %r9, 4 \n" - " vmhraddshs %v14, %v2, %v31, %v12 \n" - " vsplth %v7, %v8, 0 \n" - " vmhraddshs %v23, %v13, %v4, %v1 \n" - " vsplth %v18, %v8, 4 \n" - " vmhraddshs %v27, %v10, %v4, %v1 \n" - " vspltw %v8, %v8, 3 \n" - " vmhraddshs %v12, %v17, %v22, %v26 \n" - " vperm %v15, %v15, %v1, %v19 \n" - " vslh %v9, %v9, %v21 \n" - " vmhraddshs %v10, %v5, %v6, %v1 \n" - " vspltish %v21, 6 \n" - " vmhraddshs %v30, %v9, %v6, %v1 \n" - " vmhraddshs %v26, %v16, %v26, %v22 \n" - " vmhraddshs %v24, %v2, %v24, %v31 \n" - " vmhraddshs %v31, %v11, %v23, %v27 \n" - " vsubshs %v0, %v1, %v23 \n" - " vaddshs %v23, %v14, %v12 \n" - " vmhraddshs %v9, %v11, %v27, %v0 \n" - " vsubshs %v12, %v14, %v12 \n" - " vaddshs %v6, %v10, %v30 \n" - " vsubshs %v14, %v24, %v26 \n" - " vaddshs %v24, %v24, %v26 \n" - " vsubshs %v13, %v10, %v30 \n" - " vaddshs %v26, %v6, %v31 \n" - " vsubshs %v31, %v6, %v31 \n" - " vaddshs %v6, %v13, %v9 \n" - " vsubshs %v13, %v13, %v9 \n" - " vsubshs %v9, %v14, %v12 \n" - " vaddshs %v12, %v14, %v12 \n" - " vmhraddshs %v30, %v7, %v9, %v13 \n" - " vmhraddshs %v25, %v18, %v12, %v6 \n" - " vmhraddshs %v28, %v18, %v9, %v13 \n" - " vmhraddshs %v29, %v7, %v12, %v6 \n" - " vaddshs %v10, %v26, %v24 \n" - " vsubshs %v5, %v31, %v23 \n" - " vsubshs %v13, %v26, %v24 \n" - " vaddshs %v4, %v31, %v23 \n" - " vmrglh %v26, %v30, %v25 \n" - " vmrglh %v31, %v10, %v5 \n" - " vmrglh %v22, %v29, %v28 \n" - " vmrghh %v30, %v30, %v25 \n" - " vmrglh %v24, %v4, %v13 \n" - " vmrghh %v10, %v10, %v5 \n" - " vmrghh %v23, %v4, %v13 \n" - " vmrghh %v27, %v29, %v28 \n" - " vmrglh %v29, %v10, %v30 \n" - " vmrglh %v4, %v31, %v26 \n" - " vmrglh %v13, %v22, %v24 \n" - " vmrghh %v10, %v10, %v30 \n" - " vmrghh %v25, %v22, %v24 \n" - " vmrglh %v24, %v4, %v13 \n" - " vmrghh %v5, %v27, %v23 \n" - " vmrglh %v28, %v27, %v23 \n" - " vsubshs %v0, %v1, %v24 \n" - " vmrghh %v30, %v31, %v26 \n" - " vmrglh %v31, %v10, %v5 \n" - " vmrglh %v26, %v30, %v25 \n" - " vmrglh %v22, %v29, %v28 \n" - " vmhraddshs %v14, %v2, %v31, %v0 \n" - " vmrghh %v23, %v4, %v13 \n" - " vmhraddshs %v24, %v2, %v24, %v31 \n" - " vmhraddshs %v12, %v17, %v22, %v26 \n" - " vmrghh %v27, %v29, %v28 \n" - " vmhraddshs %v26, %v16, %v26, %v22 \n" - " vmrghh %v0, %v10, %v5 \n" - " vmhraddshs %v31, %v11, %v23, %v27 \n" - " vmrghh %v30, %v30, %v25 \n" - " vsubshs %v13, %v1, %v23 \n" - " vaddshs %v10, %v0, %v8 \n" - " vaddshs %v23, %v14, %v12 \n" - " vsubshs %v12, %v14, %v12 \n" - " vaddshs %v6, %v10, %v30 \n" - " vsubshs %v14, %v24, %v26 \n" - " vmhraddshs %v9, %v11, %v27, %v13 \n" - " vaddshs %v24, %v24, %v26 \n" - " vaddshs %v26, %v6, %v31 \n" - " vsubshs %v13, %v10, %v30 \n" - " vaddshs %v10, %v26, %v24 \n" - " vsubshs %v31, %v6, %v31 \n" - " vaddshs %v6, %v13, %v9 \n" - " vsrah %v10, %v10, %v21 \n" - " vsubshs %v13, %v13, %v9 \n" - " vaddshs %v0, %v15, %v10 \n" - " vsubshs %v9, %v14, %v12 \n" - " vaddshs %v12, %v14, %v12 \n" - " vpkshus %v15, %v0, %v0 \n" - " stvewx %v15, 0, %r4 \n" - " vaddshs %v4, %v31, %v23 \n" - " vmhraddshs %v29, %v7, %v12, %v6 \n" - " stvewx %v15, %r9, %r4 \n" - " add %r4, %r4, %r5 \n" - " vsubshs %v5, %v31, %v23 \n" - " lvx %v15, 0, %r4 \n" - " vmhraddshs %v30, %v7, %v9, %v13 \n" - " vsrah %v22, %v4, %v21 \n" - " vperm %v15, %v15, %v1, %v3 \n" - " vmhraddshs %v28, %v18, %v9, %v13 \n" - " vsrah %v31, %v29, %v21 \n" - " vsubshs %v13, %v26, %v24 \n" - " vaddshs %v0, %v15, %v31 \n" - " vsrah %v27, %v30, %v21 \n" - " vpkshus %v15, %v0, %v0 \n" - " vsrah %v30, %v5, %v21 \n" - " stvewx %v15, 0, %r4 \n" - " vsrah %v26, %v28, %v21 \n" - " stvewx %v15, %r9, %r4 \n" - " vmhraddshs %v25, %v18, %v12, %v6 \n" - " add %r4, %r4, %r5 \n" - " vsrah %v24, %v13, %v21 \n" - " lvx %v15, 0, %r4 \n" - " vperm %v15, %v15, %v1, %v19 \n" - " vsrah %v23, %v25, %v21 \n" - " vaddshs %v0, %v15, %v27 \n" - " vpkshus %v15, %v0, %v0 \n" - " stvewx %v15, 0, %r4 \n" - " stvewx %v15, %r9, %r4 \n" - " add %r4, %r4, %r5 \n" - " lvx %v15, 0, %r4 \n" - " vperm %v15, %v15, %v1, %v3 \n" - " vaddshs %v0, %v15, %v22 \n" - " vpkshus %v15, %v0, %v0 \n" - " stvewx %v15, 0, %r4 \n" - " stvewx %v15, %r9, %r4 \n" - " add %r4, %r4, %r5 \n" - " lvx %v15, 0, %r4 \n" - " vperm %v15, %v15, %v1, %v19 \n" - " vaddshs %v0, %v15, %v30 \n" - " vpkshus %v15, %v0, %v0 \n" - " stvewx %v15, 0, %r4 \n" - " stvewx %v15, %r9, %r4 \n" - " add %r4, %r4, %r5 \n" - " lvx %v15, 0, %r4 \n" - " vperm %v15, %v15, %v1, %v3 \n" - " vaddshs %v0, %v15, %v26 \n" - " vpkshus %v15, %v0, %v0 \n" - " stvewx %v15, 0, %r4 \n" - " stvewx %v15, %r9, %r4 \n" - " add %r4, %r4, %r5 \n" - " lvx %v15, 0, %r4 \n" - " vperm %v15, %v15, %v1, %v19 \n" - " vaddshs %v0, %v15, %v23 \n" - " vpkshus %v15, %v0, %v0 \n" - " stvewx %v15, 0, %r4 \n" - " stvewx %v15, %r9, %r4 \n" - " add %r4, %r4, %r5 \n" - " lvx %v15, 0, %r4 \n" - " vperm %v15, %v15, %v1, %v3 \n" - " vaddshs %v0, %v15, %v24 \n" - " vpkshus %v15, %v0, %v0 \n" - " stvewx %v15, 0, %r4 \n" - " stvewx %v15, %r9, %r4 \n" - - "# addi %r0, %r1, 192 \n" - "# bl _restv21 \n" - "# lwz %r0, 196(%r1) \n" - "# mtlr %r0 \n" - "# la %r1, 192(%r1) \n" - " addi %r9, %r3, 16 \n" - " stvx %v1, 0, %r3 \n" - " stvx %v1, 0, %r9 \n" - " addi %r11, %r3, 32 \n" - " stvx %v1, 0, %r11 \n" - " addi %r9, %r3, 48 \n" - " stvx %v1, 0, %r9 \n" - " addi %r11, %r3, -64 \n" - " stvx %v1, 0, %r11 \n" - " addi %r9, %r3, -48 \n" - " stvx %v1, 0, %r9 \n" - " addi %r11, %r3, -32 \n" - " stvx %v1, 0, %r11 \n" - " addi %r3, %r3, -16 \n" - " stvx %v1, 0, %r3 \n" - ); -} - -static const int16_t constants[5][8] ATTR_ALIGN(16) __attribute__((used)) = { - {23170, 13573, 6518, 21895, -23170, -21895, 32, 31}, - {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725}, - {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521}, - {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692}, - {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722} -}; - -void mpeg2_idct_altivec_init (void) -{ - int i, j; - - /* the altivec idct uses a transposed input, so we patch scan tables */ - for (i = 0; i < 64; i++) { - j = mpeg2_scan_norm[i]; - mpeg2_scan_norm[i] = (j >> 3) | ((j & 7) << 3); - j = mpeg2_scan_alt[i]; - mpeg2_scan_alt[i] = (j >> 3) | ((j & 7) << 3); - } -} - -#else /* HOST_OS_DARWIN */ - #define vector_s16_t vector signed short #define vector_u16_t vector unsigned short #define vector_s8_t vector signed char @@ -622,12 +142,18 @@ void mpeg2_idct_altivec_init (void) vx6 = vec_sra (vy6, shift); \ vx7 = vec_sra (vy7, shift); +#if defined( __APPLE_CC__ ) && defined( __APPLE_ALTIVEC__ ) /* apple */ +#define VEC_S16(a,b,c,d,e,f,g,h) (vector_s16_t) (a, b, c, d, e, f, g, h) +#else /* gnu */ +#define VEC_S16(a,b,c,d,e,f,g,h) (vector_s16_t) {a, b, c, d, e, f, g, h} +#endif + static vector_s16_t constants[5] = { - (vector_s16_t)(23170, 13573, 6518, 21895, -23170, -21895, 32, 31), - (vector_s16_t)(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725), - (vector_s16_t)(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521), - (vector_s16_t)(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692), - (vector_s16_t)(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722) + VEC_S16(23170, 13573, 6518, 21895, -23170, -21895, 32, 31), + VEC_S16(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725), + VEC_S16(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521), + VEC_S16(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692), + VEC_S16(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722) }; void mpeg2_idct_copy_altivec (vector_s16_t * block, unsigned char * dest, @@ -703,7 +229,5 @@ void mpeg2_idct_altivec_init (void) } } -#endif /* HOST_OS_DARWIN */ - #endif /* ARCH_PPC && ENABLED_ALTIVEC */ diff --git a/src/libmpeg2/mpeg2_internal.h b/src/libmpeg2/mpeg2_internal.h index 915bece2c..c2ffbf909 100644 --- a/src/libmpeg2/mpeg2_internal.h +++ b/src/libmpeg2/mpeg2_internal.h @@ -27,6 +27,10 @@ #include "video_out.h" #include "accel_xvmc.h" +#ifdef ENABLE_ALTIVEC +#include <altivec.h> +#endif + /* macroblock modes */ #define MACROBLOCK_INTRA XINE_MACROBLOCK_INTRA #define MACROBLOCK_PATTERN XINE_MACROBLOCK_PATTERN @@ -243,10 +247,6 @@ void mpeg2_zero_block_mmx (int16_t * block); void mpeg2_idct_mmx_init (void); /* idct_altivec.c */ -#ifndef HOST_OS_DARWIN -void mpeg2_idct_copy_altivec (int16_t * block, uint8_t * dest, int stride); -void mpeg2_idct_add_altivec (int16_t * block, uint8_t * dest, int stride); -#else /* HOST_OS_DARWIN */ # ifdef ENABLE_ALTIVEC void mpeg2_idct_copy_altivec (vector signed short * block, unsigned char * dest, int stride); @@ -258,7 +258,6 @@ void mpeg2_idct_copy_altivec (signed short * block, unsigned char * dest, void mpeg2_idct_add_altivec (signed short * block, unsigned char * dest, int stride); # endif /* ENABLE_ALTIVEC */ -#endif /* HOST_OS_DARWIN */ void mpeg2_idct_altivec_init (void); /* motion_comp.c */ diff --git a/src/libmpeg2/slice.c b/src/libmpeg2/slice.c index 50d7b4caf..f214f1f25 100644 --- a/src/libmpeg2/slice.c +++ b/src/libmpeg2/slice.c @@ -205,7 +205,7 @@ static inline int get_motion_delta (picture_t * picture, int f_code) #undef bit_ptr } -static inline int bound_motion_vector (int vector, int f_code) +static inline int bound_motion_vector (int vec, int f_code) { #if 1 unsigned int limit; @@ -213,11 +213,11 @@ static inline int bound_motion_vector (int vector, int f_code) limit = 16 << f_code; - if ((unsigned int)(vector + limit) < 2 * limit) - return vector; + if ((unsigned int)(vec + limit) < 2 * limit) + return vec; else { - sign = ((int32_t)vector) >> 31; - return vector - ((2 * limit) ^ sign) + sign; + sign = ((int32_t)vec) >> 31; + return vec - ((2 * limit) ^ sign) + sign; } #else return ((int32_t)vector << (27 - f_code)) >> (27 - f_code); diff --git a/src/libmpeg2/slice_xvmc.c b/src/libmpeg2/slice_xvmc.c index fb0fab1e9..4aa383b9a 100644 --- a/src/libmpeg2/slice_xvmc.c +++ b/src/libmpeg2/slice_xvmc.c @@ -256,7 +256,7 @@ static inline int get_xvmc_motion_delta (picture_t * picture, int f_code) #undef bit_ptr } -static inline int bound_motion_vector (int vector, int f_code) +static inline int bound_motion_vector (int vec, int f_code) { #if 1 unsigned int limit; @@ -264,14 +264,14 @@ static inline int bound_motion_vector (int vector, int f_code) limit = 16 << f_code; - if ((unsigned int)(vector + limit) < 2 * limit) - return vector; + if ((unsigned int)(vec + limit) < 2 * limit) + return vec; else { - sign = ((int32_t)vector) >> 31; - return vector - ((2 * limit) ^ sign) + sign; + sign = ((int32_t)vec) >> 31; + return vec - ((2 * limit) ^ sign) + sign; } #else - return ((int32_t)vector << (27 - f_code)) >> (27 - f_code); + return ((int32_t)vec << (27 - f_code)) >> (27 - f_code); #endif } |