summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--AUTHORS3
-rw-r--r--ChangeLog1
-rw-r--r--acconfig.h6
-rw-r--r--configure.in9
-rw-r--r--src/libmpeg2/Makefile.am3
-rw-r--r--src/libmpeg2/idct.c8
-rw-r--r--src/libmpeg2/idct_altivec.c668
-rw-r--r--src/libmpeg2/motion_comp.c6
-rw-r--r--src/libmpeg2/motion_comp_altivec.c2024
-rw-r--r--src/libmpeg2/mpeg2_internal.h10
-rw-r--r--src/xine-utils/cpu_accel.c34
-rw-r--r--src/xine-utils/xineutils.h4
12 files changed, 2756 insertions, 20 deletions
diff --git a/AUTHORS b/AUTHORS
index 3e824fb0e..764ed6cb7 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -170,4 +170,7 @@ Contributions
Juan Manuel García Molina <juanmagm@mail.com>
spanish internationalization files.
+ Jeffrey W. Baker <jwbaker@acm.org>
+ altivec support for libmpeg2
+
(let us know if we've forgotten anyone)
diff --git a/ChangeLog b/ChangeLog
index 016822980..4e86aa516 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -6,6 +6,7 @@ xine (0.9.7) unstable; urgency=low
* OSD (On Screen Display) for rendering text and graphics into overlays
* reworked spu and overlay manager (multiple overlays supported)
* support for avi text subtitles (use something like xine stream.avi:foo.sub)
+ * altivec support
-- Guenter Bartsch <guenter@users.sourceforge.net> Tue Nov 27 01:20:06 CET 2001
diff --git a/acconfig.h b/acconfig.h
index e0f1b61cc..eaaea2831 100644
--- a/acconfig.h
+++ b/acconfig.h
@@ -18,6 +18,12 @@
/* Define this if you're running PowerPC architecture */
#undef __ppc__
+/* Define this if you're running PowerPC architecture */
+#undef ARCH_PPC
+
+/* Define this if you have the Motorola 74xx CPU */
+#undef ENABLE_ALTIVEC
+
/* Define this if you're running Sparc architecture */
#undef __sparc__
diff --git a/configure.in b/configure.in
index d01c1b35f..31bb299f4 100644
--- a/configure.in
+++ b/configure.in
@@ -177,6 +177,9 @@ LIBMPEG2_CFLAGS="" dnl default include path removed, no more needed.
LIBA52_CFLAGS=""
LIBFFMPEG_CFLAGS=""
+AC_ARG_ENABLE(altivec, [ --enable-altivec use assembly codes for Motorola 74xx CPUs],
+ enable_altivec=yes, enable_altivec=no)
+
if test x$enable_mlib = x; then
AC_ARG_ENABLE(mlib,
[ --disable-mlib make a version not using mediaLib],
@@ -561,6 +564,12 @@ case "$host_or_hostalias" in
GLOBAL_CFLAGS="$GLOBAL_CFLAGS -O3 -pipe -fomit-frame-pointer $m_wm $m_psb -fexpensive-optimizations $f_si $f_nsa -ffast-math -funroll-loops -funroll-all-loops -finline-functions"
DEBUG_CFLAGS="$DEBUG_CFLAGS -O3"
AC_DEFINE(FPM_PPC)
+ AC_DEFINE(ARCH_PPC)
+
+ if test x$enable_altivec = xyes; then
+ AC_DEFINE(ENABLE_ALTIVEC)
+ GLOBAL_CFLAGS="$GLOBAL_CFLAGS -Wa,-m7400"
+ fi
;;
sparc*-*-linux*)
diff --git a/src/libmpeg2/Makefile.am b/src/libmpeg2/Makefile.am
index cf093d802..eec705b15 100644
--- a/src/libmpeg2/Makefile.am
+++ b/src/libmpeg2/Makefile.am
@@ -14,7 +14,8 @@ lib_LTLIBRARIES = xineplug_decode_mpeg2.la
xineplug_decode_mpeg2_la_SOURCES = slice.c header.c stats.c idct.c \
motion_comp.c decode.c idct_mmx.c motion_comp_mmx.c \
- idct_mlib.c motion_comp_mlib.c xine_decoder.c
+ idct_mlib.c motion_comp_mlib.c idct_altivec.c \
+ motion_comp_altivec.c xine_decoder.c
xineplug_decode_mpeg2_la_LDFLAGS = -avoid-version -module
noinst_HEADERS = vlc.h mpeg2.h mpeg2_internal.h
diff --git a/src/libmpeg2/idct.c b/src/libmpeg2/idct.c
index 2aaf6eaf9..0b77a62ed 100644
--- a/src/libmpeg2/idct.c
+++ b/src/libmpeg2/idct.c
@@ -84,6 +84,14 @@ void idct_init (void)
idct_block_add = idct_block_add_mlib;
} else
#endif
+#ifdef ENABLE_ALTIVEC
+ if (config.flags & MM_ACCEL_PPC_ALTIVEC) {
+ fprintf (stderr, "Using altivec for IDCT transform\n");
+ idct_block_copy = idct_block_copy_altivec;
+ idct_block_add = idct_block_add_altivec;
+ idct_altivec_init ();
+ } else
+#endif
{
int i;
diff --git a/src/libmpeg2/idct_altivec.c b/src/libmpeg2/idct_altivec.c
new file mode 100644
index 000000000..96be2ebc3
--- /dev/null
+++ b/src/libmpeg2/idct_altivec.c
@@ -0,0 +1,668 @@
+/*
+ * idct_altivec.c
+ * Copyright (C) 2000-2001 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ALTIVEC__
+
+#include "config.h"
+
+#ifdef ARCH_PPC
+#ifdef ENABLE_ALTIVEC
+
+#include <inttypes.h>
+
+#include "mpeg2_internal.h"
+#include "xineutils.h"
+
+static int16_t constants[5][8] ATTR_ALIGN(16) = {
+ {23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
+ {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
+ {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
+ {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
+ {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
+};
+
+/*
+ * The asm code is generated with:
+ *
+ * gcc-2.95 -fvec -D__ALTIVEC__ -O9 -fomit-frame-pointer -mregnames -S
+ * idct_altivec.c
+ *
+ * awk '{args=""; len=split ($2, arg, ",");
+ * for (i=1; i<=len; i++) { a=arg[i]; if (i<len) a=a",";
+ * args = args sprintf ("%-6s", a) }
+ * printf ("\t\"\t%-16s%-24s\\n\"\n", $1, args) }' idct_altivec.s |
+ * unexpand -a
+ *
+ * I then do some simple trimming on the function prolog/trailers
+ */
+
+void idct_block_copy_altivec (int16_t * block, uint8_t * dest, int stride)
+{
+ asm (" \n"
+ "# stwu %r1, -128(%r1) \n"
+ "# mflr %r0 \n"
+ "# stw %r0, 132(%r1) \n"
+ "# addi %r0, %r1, 128 \n"
+ "# bl _savev25 \n"
+
+ " addi %r9, %r3, 112 \n"
+ " vspltish %v25, 4 \n"
+ " vxor %v13, %v13, %v13 \n"
+ " lis %r10, constants@ha \n"
+ " lvx %v1, 0, %r9 \n"
+ " la %r10, constants@l(%r10) \n"
+ " lvx %v5, 0, %r3 \n"
+ " addi %r9, %r3, 16 \n"
+ " lvx %v8, 0, %r10 \n"
+ " addi %r11, %r10, 32 \n"
+ " lvx %v12, 0, %r9 \n"
+ " lvx %v6, 0, %r11 \n"
+ " addi %r8, %r3, 48 \n"
+ " vslh %v1, %v1, %v25 \n"
+ " addi %r9, %r3, 80 \n"
+ " lvx %v11, 0, %r8 \n"
+ " vslh %v5, %v5, %v25 \n"
+ " lvx %v0, 0, %r9 \n"
+ " addi %r11, %r10, 64 \n"
+ " vsplth %v3, %v8, 2 \n"
+ " lvx %v7, 0, %r11 \n"
+ " addi %r9, %r3, 96 \n"
+ " vslh %v12, %v12, %v25 \n"
+ " vmhraddshs %v27, %v1, %v6, %v13 \n"
+ " addi %r8, %r3, 32 \n"
+ " vsplth %v2, %v8, 5 \n"
+ " lvx %v1, 0, %r9 \n"
+ " vslh %v11, %v11, %v25 \n"
+ " addi %r3, %r3, 64 \n"
+ " lvx %v9, 0, %r8 \n"
+ " addi %r9, %r10, 48 \n"
+ " vslh %v0, %v0, %v25 \n"
+ " lvx %v4, 0, %r9 \n"
+ " vmhraddshs %v31, %v12, %v6, %v13 \n"
+ " addi %r10, %r10, 16 \n"
+ " vmhraddshs %v30, %v0, %v7, %v13 \n"
+ " lvx %v10, 0, %r3 \n"
+ " vsplth %v19, %v8, 3 \n"
+ " vmhraddshs %v15, %v11, %v7, %v13 \n"
+ " lvx %v12, 0, %r10 \n"
+ " vsplth %v6, %v8, 4 \n"
+ " vslh %v1, %v1, %v25 \n"
+ " vsplth %v11, %v8, 1 \n"
+ " li %r9, 4 \n"
+ " vslh %v9, %v9, %v25 \n"
+ " vsplth %v7, %v8, 0 \n"
+ " vmhraddshs %v18, %v1, %v4, %v13 \n"
+ " vspltw %v8, %v8, 3 \n"
+ " vsubshs %v0, %v13, %v27 \n"
+ " vmhraddshs %v1, %v9, %v4, %v13 \n"
+ " vmhraddshs %v17, %v3, %v31, %v0 \n"
+ " vmhraddshs %v4, %v2, %v15, %v30 \n"
+ " vslh %v10, %v10, %v25 \n"
+ " vmhraddshs %v9, %v5, %v12, %v13 \n"
+ " vspltish %v25, 6 \n"
+ " vmhraddshs %v5, %v10, %v12, %v13 \n"
+ " vmhraddshs %v28, %v19, %v30, %v15 \n"
+ " vmhraddshs %v27, %v3, %v27, %v31 \n"
+ " vsubshs %v0, %v13, %v18 \n"
+ " vmhraddshs %v18, %v11, %v18, %v1 \n"
+ " vaddshs %v30, %v17, %v4 \n"
+ " vmhraddshs %v12, %v11, %v1, %v0 \n"
+ " vsubshs %v4, %v17, %v4 \n"
+ " vaddshs %v10, %v9, %v5 \n"
+ " vsubshs %v17, %v27, %v28 \n"
+ " vaddshs %v27, %v27, %v28 \n"
+ " vsubshs %v1, %v9, %v5 \n"
+ " vaddshs %v28, %v10, %v18 \n"
+ " vsubshs %v18, %v10, %v18 \n"
+ " vaddshs %v10, %v1, %v12 \n"
+ " vsubshs %v1, %v1, %v12 \n"
+ " vsubshs %v12, %v17, %v4 \n"
+ " vaddshs %v4, %v17, %v4 \n"
+ " vmhraddshs %v5, %v7, %v12, %v1 \n"
+ " vmhraddshs %v26, %v6, %v4, %v10 \n"
+ " vmhraddshs %v29, %v6, %v12, %v1 \n"
+ " vmhraddshs %v14, %v7, %v4, %v10 \n"
+ " vsubshs %v12, %v18, %v30 \n"
+ " vaddshs %v9, %v28, %v27 \n"
+ " vaddshs %v16, %v18, %v30 \n"
+ " vsubshs %v10, %v28, %v27 \n"
+ " vmrglh %v31, %v9, %v12 \n"
+ " vmrglh %v30, %v5, %v26 \n"
+ " vmrglh %v15, %v14, %v29 \n"
+ " vmrghh %v5, %v5, %v26 \n"
+ " vmrglh %v27, %v16, %v10 \n"
+ " vmrghh %v9, %v9, %v12 \n"
+ " vmrghh %v18, %v16, %v10 \n"
+ " vmrghh %v1, %v14, %v29 \n"
+ " vmrglh %v14, %v9, %v5 \n"
+ " vmrglh %v16, %v31, %v30 \n"
+ " vmrglh %v10, %v15, %v27 \n"
+ " vmrghh %v9, %v9, %v5 \n"
+ " vmrghh %v26, %v15, %v27 \n"
+ " vmrglh %v27, %v16, %v10 \n"
+ " vmrghh %v12, %v1, %v18 \n"
+ " vmrglh %v29, %v1, %v18 \n"
+ " vsubshs %v0, %v13, %v27 \n"
+ " vmrghh %v5, %v31, %v30 \n"
+ " vmrglh %v31, %v9, %v12 \n"
+ " vmrglh %v30, %v5, %v26 \n"
+ " vmrglh %v15, %v14, %v29 \n"
+ " vmhraddshs %v17, %v3, %v31, %v0 \n"
+ " vmrghh %v18, %v16, %v10 \n"
+ " vmhraddshs %v27, %v3, %v27, %v31 \n"
+ " vmhraddshs %v4, %v2, %v15, %v30 \n"
+ " vmrghh %v1, %v14, %v29 \n"
+ " vmhraddshs %v28, %v19, %v30, %v15 \n"
+ " vmrghh %v0, %v9, %v12 \n"
+ " vsubshs %v13, %v13, %v18 \n"
+ " vmrghh %v5, %v5, %v26 \n"
+ " vmhraddshs %v18, %v11, %v18, %v1 \n"
+ " vaddshs %v9, %v0, %v8 \n"
+ " vaddshs %v30, %v17, %v4 \n"
+ " vmhraddshs %v12, %v11, %v1, %v13 \n"
+ " vsubshs %v4, %v17, %v4 \n"
+ " vaddshs %v10, %v9, %v5 \n"
+ " vsubshs %v17, %v27, %v28 \n"
+ " vaddshs %v27, %v27, %v28 \n"
+ " vsubshs %v1, %v9, %v5 \n"
+ " vaddshs %v28, %v10, %v18 \n"
+ " vsubshs %v18, %v10, %v18 \n"
+ " vaddshs %v10, %v1, %v12 \n"
+ " vsubshs %v1, %v1, %v12 \n"
+ " vsubshs %v12, %v17, %v4 \n"
+ " vaddshs %v4, %v17, %v4 \n"
+ " vaddshs %v9, %v28, %v27 \n"
+ " vmhraddshs %v14, %v7, %v4, %v10 \n"
+ " vsrah %v9, %v9, %v25 \n"
+ " vmhraddshs %v5, %v7, %v12, %v1 \n"
+ " vpkshus %v0, %v9, %v9 \n"
+ " vmhraddshs %v29, %v6, %v12, %v1 \n"
+ " stvewx %v0, 0, %r4 \n"
+ " vaddshs %v16, %v18, %v30 \n"
+ " vsrah %v31, %v14, %v25 \n"
+ " stvewx %v0, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vsrah %v15, %v16, %v25 \n"
+ " vpkshus %v0, %v31, %v31 \n"
+ " vsrah %v1, %v5, %v25 \n"
+ " stvewx %v0, 0, %r4 \n"
+ " vsubshs %v12, %v18, %v30 \n"
+ " stvewx %v0, %r9, %r4 \n"
+ " vmhraddshs %v26, %v6, %v4, %v10 \n"
+ " vpkshus %v0, %v1, %v1 \n"
+ " add %r4, %r4, %r5 \n"
+ " vsrah %v5, %v12, %v25 \n"
+ " stvewx %v0, 0, %r4 \n"
+ " vsrah %v30, %v29, %v25 \n"
+ " stvewx %v0, %r9, %r4 \n"
+ " vsubshs %v10, %v28, %v27 \n"
+ " vpkshus %v0, %v15, %v15 \n"
+ " add %r4, %r4, %r5 \n"
+ " stvewx %v0, 0, %r4 \n"
+ " vsrah %v18, %v26, %v25 \n"
+ " stvewx %v0, %r9, %r4 \n"
+ " vsrah %v27, %v10, %v25 \n"
+ " vpkshus %v0, %v5, %v5 \n"
+ " add %r4, %r4, %r5 \n"
+ " stvewx %v0, 0, %r4 \n"
+ " stvewx %v0, %r9, %r4 \n"
+ " vpkshus %v0, %v30, %v30 \n"
+ " add %r4, %r4, %r5 \n"
+ " stvewx %v0, 0, %r4 \n"
+ " stvewx %v0, %r9, %r4 \n"
+ " vpkshus %v0, %v18, %v18 \n"
+ " add %r4, %r4, %r5 \n"
+ " stvewx %v0, 0, %r4 \n"
+ " stvewx %v0, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vpkshus %v0, %v27, %v27 \n"
+ " stvewx %v0, 0, %r4 \n"
+ " stvewx %v0, %r9, %r4 \n"
+
+ "# addi %r0, %r1, 128 \n"
+ "# bl _restv25 \n"
+ "# lwz %r0, 132(%r1) \n"
+ "# mtlr %r0 \n"
+ "# la %r1, 128(%r1) \n"
+ );
+}
+
+void idct_block_add_altivec (int16_t * block, uint8_t * dest, int stride)
+{
+ asm (" \n"
+ "# stwu %r1, -192(%r1) \n"
+ "# mflr %r0 \n"
+ "# stw %r0, 196(%r1) \n"
+ "# addi %r0, %r1, 192 \n"
+ "# bl _savev21 \n"
+
+ " addi %r9, %r3, 112 \n"
+ " vspltish %v21, 4 \n"
+ " vxor %v1, %v1, %v1 \n"
+ " lvx %v13, 0, %r9 \n"
+ " lis %r10, constants@ha \n"
+ " vspltisw %v3, -1 \n"
+ " la %r10, constants@l(%r10) \n"
+ " lvx %v5, 0, %r3 \n"
+ " addi %r9, %r3, 16 \n"
+ " lvx %v8, 0, %r10 \n"
+ " lvx %v12, 0, %r9 \n"
+ " addi %r11, %r10, 32 \n"
+ " lvx %v6, 0, %r11 \n"
+ " addi %r8, %r3, 48 \n"
+ " vslh %v13, %v13, %v21 \n"
+ " addi %r9, %r3, 80 \n"
+ " lvx %v11, 0, %r8 \n"
+ " vslh %v5, %v5, %v21 \n"
+ " lvx %v0, 0, %r9 \n"
+ " addi %r11, %r10, 64 \n"
+ " vsplth %v2, %v8, 2 \n"
+ " lvx %v7, 0, %r11 \n"
+ " vslh %v12, %v12, %v21 \n"
+ " addi %r9, %r3, 96 \n"
+ " vmhraddshs %v24, %v13, %v6, %v1 \n"
+ " addi %r8, %r3, 32 \n"
+ " vsplth %v17, %v8, 5 \n"
+ " lvx %v13, 0, %r9 \n"
+ " vslh %v11, %v11, %v21 \n"
+ " addi %r3, %r3, 64 \n"
+ " lvx %v10, 0, %r8 \n"
+ " vslh %v0, %v0, %v21 \n"
+ " addi %r9, %r10, 48 \n"
+ " vmhraddshs %v31, %v12, %v6, %v1 \n"
+ " lvx %v4, 0, %r9 \n"
+ " addi %r10, %r10, 16 \n"
+ " vmhraddshs %v26, %v0, %v7, %v1 \n"
+ " lvx %v9, 0, %r3 \n"
+ " vsplth %v16, %v8, 3 \n"
+ " vmhraddshs %v22, %v11, %v7, %v1 \n"
+ " lvx %v6, 0, %r10 \n"
+ " lvsl %v19, 0, %r4 \n"
+ " vsubshs %v12, %v1, %v24 \n"
+ " lvsl %v0, %r5, %r4 \n"
+ " vsplth %v11, %v8, 1 \n"
+ " vslh %v10, %v10, %v21 \n"
+ " vmrghb %v19, %v3, %v19 \n"
+ " lvx %v15, 0, %r4 \n"
+ " vslh %v13, %v13, %v21 \n"
+ " vmrghb %v3, %v3, %v0 \n"
+ " li %r9, 4 \n"
+ " vmhraddshs %v14, %v2, %v31, %v12 \n"
+ " vsplth %v7, %v8, 0 \n"
+ " vmhraddshs %v23, %v13, %v4, %v1 \n"
+ " vsplth %v18, %v8, 4 \n"
+ " vmhraddshs %v27, %v10, %v4, %v1 \n"
+ " vspltw %v8, %v8, 3 \n"
+ " vmhraddshs %v12, %v17, %v22, %v26 \n"
+ " vperm %v15, %v15, %v1, %v19 \n"
+ " vslh %v9, %v9, %v21 \n"
+ " vmhraddshs %v10, %v5, %v6, %v1 \n"
+ " vspltish %v21, 6 \n"
+ " vmhraddshs %v30, %v9, %v6, %v1 \n"
+ " vmhraddshs %v26, %v16, %v26, %v22 \n"
+ " vmhraddshs %v24, %v2, %v24, %v31 \n"
+ " vmhraddshs %v31, %v11, %v23, %v27 \n"
+ " vsubshs %v0, %v1, %v23 \n"
+ " vaddshs %v23, %v14, %v12 \n"
+ " vmhraddshs %v9, %v11, %v27, %v0 \n"
+ " vsubshs %v12, %v14, %v12 \n"
+ " vaddshs %v6, %v10, %v30 \n"
+ " vsubshs %v14, %v24, %v26 \n"
+ " vaddshs %v24, %v24, %v26 \n"
+ " vsubshs %v13, %v10, %v30 \n"
+ " vaddshs %v26, %v6, %v31 \n"
+ " vsubshs %v31, %v6, %v31 \n"
+ " vaddshs %v6, %v13, %v9 \n"
+ " vsubshs %v13, %v13, %v9 \n"
+ " vsubshs %v9, %v14, %v12 \n"
+ " vaddshs %v12, %v14, %v12 \n"
+ " vmhraddshs %v30, %v7, %v9, %v13 \n"
+ " vmhraddshs %v25, %v18, %v12, %v6 \n"
+ " vmhraddshs %v28, %v18, %v9, %v13 \n"
+ " vmhraddshs %v29, %v7, %v12, %v6 \n"
+ " vaddshs %v10, %v26, %v24 \n"
+ " vsubshs %v5, %v31, %v23 \n"
+ " vsubshs %v13, %v26, %v24 \n"
+ " vaddshs %v4, %v31, %v23 \n"
+ " vmrglh %v26, %v30, %v25 \n"
+ " vmrglh %v31, %v10, %v5 \n"
+ " vmrglh %v22, %v29, %v28 \n"
+ " vmrghh %v30, %v30, %v25 \n"
+ " vmrglh %v24, %v4, %v13 \n"
+ " vmrghh %v10, %v10, %v5 \n"
+ " vmrghh %v23, %v4, %v13 \n"
+ " vmrghh %v27, %v29, %v28 \n"
+ " vmrglh %v29, %v10, %v30 \n"
+ " vmrglh %v4, %v31, %v26 \n"
+ " vmrglh %v13, %v22, %v24 \n"
+ " vmrghh %v10, %v10, %v30 \n"
+ " vmrghh %v25, %v22, %v24 \n"
+ " vmrglh %v24, %v4, %v13 \n"
+ " vmrghh %v5, %v27, %v23 \n"
+ " vmrglh %v28, %v27, %v23 \n"
+ " vsubshs %v0, %v1, %v24 \n"
+ " vmrghh %v30, %v31, %v26 \n"
+ " vmrglh %v31, %v10, %v5 \n"
+ " vmrglh %v26, %v30, %v25 \n"
+ " vmrglh %v22, %v29, %v28 \n"
+ " vmhraddshs %v14, %v2, %v31, %v0 \n"
+ " vmrghh %v23, %v4, %v13 \n"
+ " vmhraddshs %v24, %v2, %v24, %v31 \n"
+ " vmhraddshs %v12, %v17, %v22, %v26 \n"
+ " vmrghh %v27, %v29, %v28 \n"
+ " vmhraddshs %v26, %v16, %v26, %v22 \n"
+ " vmrghh %v0, %v10, %v5 \n"
+ " vmhraddshs %v31, %v11, %v23, %v27 \n"
+ " vmrghh %v30, %v30, %v25 \n"
+ " vsubshs %v13, %v1, %v23 \n"
+ " vaddshs %v10, %v0, %v8 \n"
+ " vaddshs %v23, %v14, %v12 \n"
+ " vsubshs %v12, %v14, %v12 \n"
+ " vaddshs %v6, %v10, %v30 \n"
+ " vsubshs %v14, %v24, %v26 \n"
+ " vmhraddshs %v9, %v11, %v27, %v13 \n"
+ " vaddshs %v24, %v24, %v26 \n"
+ " vaddshs %v26, %v6, %v31 \n"
+ " vsubshs %v13, %v10, %v30 \n"
+ " vaddshs %v10, %v26, %v24 \n"
+ " vsubshs %v31, %v6, %v31 \n"
+ " vaddshs %v6, %v13, %v9 \n"
+ " vsrah %v10, %v10, %v21 \n"
+ " vsubshs %v13, %v13, %v9 \n"
+ " vaddshs %v0, %v15, %v10 \n"
+ " vsubshs %v9, %v14, %v12 \n"
+ " vaddshs %v12, %v14, %v12 \n"
+ " vpkshus %v15, %v0, %v0 \n"
+ " stvewx %v15, 0, %r4 \n"
+ " vaddshs %v4, %v31, %v23 \n"
+ " vmhraddshs %v29, %v7, %v12, %v6 \n"
+ " stvewx %v15, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vsubshs %v5, %v31, %v23 \n"
+ " lvx %v15, 0, %r4 \n"
+ " vmhraddshs %v30, %v7, %v9, %v13 \n"
+ " vsrah %v22, %v4, %v21 \n"
+ " vperm %v15, %v15, %v1, %v3 \n"
+ " vmhraddshs %v28, %v18, %v9, %v13 \n"
+ " vsrah %v31, %v29, %v21 \n"
+ " vsubshs %v13, %v26, %v24 \n"
+ " vaddshs %v0, %v15, %v31 \n"
+ " vsrah %v27, %v30, %v21 \n"
+ " vpkshus %v15, %v0, %v0 \n"
+ " vsrah %v30, %v5, %v21 \n"
+ " stvewx %v15, 0, %r4 \n"
+ " vsrah %v26, %v28, %v21 \n"
+ " stvewx %v15, %r9, %r4 \n"
+ " vmhraddshs %v25, %v18, %v12, %v6 \n"
+ " add %r4, %r4, %r5 \n"
+ " vsrah %v24, %v13, %v21 \n"
+ " lvx %v15, 0, %r4 \n"
+ " vperm %v15, %v15, %v1, %v19 \n"
+ " vsrah %v23, %v25, %v21 \n"
+ " vaddshs %v0, %v15, %v27 \n"
+ " vpkshus %v15, %v0, %v0 \n"
+ " stvewx %v15, 0, %r4 \n"
+ " stvewx %v15, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v15, 0, %r4 \n"
+ " vperm %v15, %v15, %v1, %v3 \n"
+ " vaddshs %v0, %v15, %v22 \n"
+ " vpkshus %v15, %v0, %v0 \n"
+ " stvewx %v15, 0, %r4 \n"
+ " stvewx %v15, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v15, 0, %r4 \n"
+ " vperm %v15, %v15, %v1, %v19 \n"
+ " vaddshs %v0, %v15, %v30 \n"
+ " vpkshus %v15, %v0, %v0 \n"
+ " stvewx %v15, 0, %r4 \n"
+ " stvewx %v15, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v15, 0, %r4 \n"
+ " vperm %v15, %v15, %v1, %v3 \n"
+ " vaddshs %v0, %v15, %v26 \n"
+ " vpkshus %v15, %v0, %v0 \n"
+ " stvewx %v15, 0, %r4 \n"
+ " stvewx %v15, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v15, 0, %r4 \n"
+ " vperm %v15, %v15, %v1, %v19 \n"
+ " vaddshs %v0, %v15, %v23 \n"
+ " vpkshus %v15, %v0, %v0 \n"
+ " stvewx %v15, 0, %r4 \n"
+ " stvewx %v15, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v15, 0, %r4 \n"
+ " vperm %v15, %v15, %v1, %v3 \n"
+ " vaddshs %v0, %v15, %v24 \n"
+ " vpkshus %v15, %v0, %v0 \n"
+ " stvewx %v15, 0, %r4 \n"
+ " stvewx %v15, %r9, %r4 \n"
+
+ "# addi %r0, %r1, 192 \n"
+ "# bl _restv21 \n"
+ "# lwz %r0, 196(%r1) \n"
+ "# mtlr %r0 \n"
+ "# la %r1, 192(%r1) \n"
+ );
+}
+
+void idct_altivec_init (void)
+{
+ extern uint8_t scan_norm[64];
+ extern uint8_t scan_alt[64];
+ int i, j;
+
+ i = constants[0][0]; /* just pretending - keeps gcc happy */
+
+ /* the altivec idct uses a transposed input, so we patch scan tables */
+ for (i = 0; i < 64; i++) {
+ j = scan_norm[i];
+ scan_norm[i] = (j >> 3) | ((j & 7) << 3);
+ j = scan_alt[i];
+ scan_alt[i] = (j >> 3) | ((j & 7) << 3);
+ }
+}
+
+#endif /* ENABLE_ALTIVEC */
+#endif /* ARCH_PPC */
+
+#else /* __ALTIVEC__ */
+
+#define vector_s16_t vector signed short
+#define vector_u16_t vector unsigned short
+#define vector_s8_t vector signed char
+#define vector_u8_t vector unsigned char
+#define vector_s32_t vector signed int
+#define vector_u32_t vector unsigned int
+
+#define IDCT_HALF \
+ /* 1st stage */ \
+ t1 = vec_mradds (a1, vx7, vx1 ); \
+ t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
+ t7 = vec_mradds (a2, vx5, vx3); \
+ t3 = vec_mradds (ma2, vx3, vx5); \
+ \
+ /* 2nd stage */ \
+ t5 = vec_adds (vx0, vx4); \
+ t0 = vec_subs (vx0, vx4); \
+ t2 = vec_mradds (a0, vx6, vx2); \
+ t4 = vec_mradds (a0, vx2, vec_subs (zero,vx6)); \
+ t6 = vec_adds (t8, t3); \
+ t3 = vec_subs (t8, t3); \
+ t8 = vec_subs (t1, t7); \
+ t1 = vec_adds (t1, t7); \
+ \
+ /* 3rd stage */ \
+ t7 = vec_adds (t5, t2); \
+ t2 = vec_subs (t5, t2); \
+ t5 = vec_adds (t0, t4); \
+ t0 = vec_subs (t0, t4); \
+ t4 = vec_subs (t8, t3); \
+ t3 = vec_adds (t8, t3); \
+ \
+ /* 4th stage */ \
+ vy0 = vec_adds (t7, t1); \
+ vy7 = vec_subs (t7, t1); \
+ vy1 = vec_mradds (c4, t3, t5); \
+ vy6 = vec_mradds (mc4, t3, t5); \
+ vy2 = vec_mradds (c4, t4, t0); \
+ vy5 = vec_mradds (mc4, t4, t0); \
+ vy3 = vec_adds (t2, t6); \
+ vy4 = vec_subs (t2, t6);
+
+#define IDCT \
+ vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
+ vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
+ vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \
+ vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \
+ vector_u16_t shift; \
+ \
+ c4 = vec_splat (constants[0], 0); \
+ a0 = vec_splat (constants[0], 1); \
+ a1 = vec_splat (constants[0], 2); \
+ a2 = vec_splat (constants[0], 3); \
+ mc4 = vec_splat (constants[0], 4); \
+ ma2 = vec_splat (constants[0], 5); \
+ bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \
+ \
+ zero = vec_splat_s16 (0); \
+ shift = vec_splat_u16 (4); \
+ \
+ vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
+ vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
+ vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
+ vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
+ vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
+ vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
+ vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
+ vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
+ \
+ IDCT_HALF \
+ \
+ vx0 = vec_mergeh (vy0, vy4); \
+ vx1 = vec_mergel (vy0, vy4); \
+ vx2 = vec_mergeh (vy1, vy5); \
+ vx3 = vec_mergel (vy1, vy5); \
+ vx4 = vec_mergeh (vy2, vy6); \
+ vx5 = vec_mergel (vy2, vy6); \
+ vx6 = vec_mergeh (vy3, vy7); \
+ vx7 = vec_mergel (vy3, vy7); \
+ \
+ vy0 = vec_mergeh (vx0, vx4); \
+ vy1 = vec_mergel (vx0, vx4); \
+ vy2 = vec_mergeh (vx1, vx5); \
+ vy3 = vec_mergel (vx1, vx5); \
+ vy4 = vec_mergeh (vx2, vx6); \
+ vy5 = vec_mergel (vx2, vx6); \
+ vy6 = vec_mergeh (vx3, vx7); \
+ vy7 = vec_mergel (vx3, vx7); \
+ \
+ vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
+ vx1 = vec_mergel (vy0, vy4); \
+ vx2 = vec_mergeh (vy1, vy5); \
+ vx3 = vec_mergel (vy1, vy5); \
+ vx4 = vec_mergeh (vy2, vy6); \
+ vx5 = vec_mergel (vy2, vy6); \
+ vx6 = vec_mergeh (vy3, vy7); \
+ vx7 = vec_mergel (vy3, vy7); \
+ \
+ IDCT_HALF \
+ \
+ shift = vec_splat_u16 (6); \
+ vx0 = vec_sra (vy0, shift); \
+ vx1 = vec_sra (vy1, shift); \
+ vx2 = vec_sra (vy2, shift); \
+ vx3 = vec_sra (vy3, shift); \
+ vx4 = vec_sra (vy4, shift); \
+ vx5 = vec_sra (vy5, shift); \
+ vx6 = vec_sra (vy6, shift); \
+ vx7 = vec_sra (vy7, shift);
+
+static vector_s16_t constants[5] = {
+ (vector_s16_t)(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
+ (vector_s16_t)(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
+ (vector_s16_t)(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521),
+ (vector_s16_t)(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692),
+ (vector_s16_t)(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722)
+};
+
+void idct_block_copy_altivec (vector_s16_t * block, unsigned char * dest,
+ int stride)
+{
+ vector_u8_t tmp;
+
+ IDCT
+
+#define COPY(dest,src) \
+ tmp = vec_packsu (src, src); \
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+
+ COPY (dest, vx0) dest += stride;
+ COPY (dest, vx1) dest += stride;
+ COPY (dest, vx2) dest += stride;
+ COPY (dest, vx3) dest += stride;
+ COPY (dest, vx4) dest += stride;
+ COPY (dest, vx5) dest += stride;
+ COPY (dest, vx6) dest += stride;
+ COPY (dest, vx7)
+}
+
+void idct_block_add_altivec (vector_s16_t * block, unsigned char * dest,
+ int stride)
+{
+ vector_u8_t tmp;
+ vector_s16_t tmp2, tmp3;
+ vector_u8_t perm0;
+ vector_u8_t perm1;
+ vector_u8_t p0, p1, p;
+
+ IDCT
+
+ p0 = vec_lvsl (0, dest);
+ p1 = vec_lvsl (stride, dest);
+ p = vec_splat_u8 (-1);
+ perm0 = vec_mergeh (p, p0);
+ perm1 = vec_mergeh (p, p1);
+
+#define ADD(dest,src,perm) \
+ /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
+ tmp = vec_ld (0, dest); \
+ tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \
+ tmp3 = vec_adds (tmp2, src); \
+ tmp = vec_packsu (tmp3, tmp3); \
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+
+ ADD (dest, vx0, perm0) dest += stride;
+ ADD (dest, vx1, perm1) dest += stride;
+ ADD (dest, vx2, perm0) dest += stride;
+ ADD (dest, vx3, perm1) dest += stride;
+ ADD (dest, vx4, perm0) dest += stride;
+ ADD (dest, vx5, perm1) dest += stride;
+ ADD (dest, vx6, perm0) dest += stride;
+ ADD (dest, vx7, perm1)
+}
+
+#endif /* __ALTIVEC__ */
diff --git a/src/libmpeg2/motion_comp.c b/src/libmpeg2/motion_comp.c
index b8f308525..9ea10c532 100644
--- a/src/libmpeg2/motion_comp.c
+++ b/src/libmpeg2/motion_comp.c
@@ -50,6 +50,12 @@ void motion_comp_init (void)
mc_functions = mc_functions_mlib;
} else
#endif
+#ifdef ENABLE_ALTIVEC
+ if (config.flags & MM_ACCEL_PPC_ALTIVEC) {
+ fprintf (stderr, "Using altivec for motion compensation\n");
+ mc_functions = mc_functions_altivec;
+ } else
+#endif
{
fprintf (stderr, "No accelerated motion compensation found\n");
mc_functions = mc_functions_c;
diff --git a/src/libmpeg2/motion_comp_altivec.c b/src/libmpeg2/motion_comp_altivec.c
new file mode 100644
index 000000000..e4b3a234c
--- /dev/null
+++ b/src/libmpeg2/motion_comp_altivec.c
@@ -0,0 +1,2024 @@
+/*
+ * motion_comp_altivec.c
+ * Copyright (C) 2000-2001 Michel Lespinasse <walken@zoy.org>
+ * Copyright (C) 1999-2000 Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
+ *
+ * This file is part of mpeg2dec, a free MPEG-2 video stream decoder.
+ * See http://libmpeg2.sourceforge.net/ for updates.
+ *
+ * mpeg2dec is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * mpeg2dec is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ALTIVEC__
+
+#include "config.h"
+
+#ifdef ARCH_PPC
+#ifdef ENABLE_ALTIVEC
+
+#include <inttypes.h>
+
+#include "mpeg2_internal.h"
+
+/*
+ * The asm code is generated with:
+ *
+ * gcc-2.95 -fvec -D__ALTIVEC__ -O9 -fomit-frame-pointer -mregnames -S
+ * motion_comp_altivec.c
+ *
+ * sed 's/.L/._L/g' motion_comp_altivec.s |
+ * awk '{args=""; len=split ($2, arg, ",");
+ * for (i=1; i<=len; i++) { a=arg[i]; if (i<len) a=a",";
+ * args = args sprintf ("%-6s", a) }
+ * printf ("\t\"\t%-16s%-24s\\n\"\n", $1, args) }' |
+ * unexpand -a
+ */
+
+static void MC_put_16_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " srawi %r6, %r6, 1 \n"
+ " li %r9, 15 \n"
+ " addi %r6, %r6, -1 \n"
+ " lvsl %v12, 0, %r4 \n"
+ " mtctr %r6 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " add %r0, %r5, %r5 \n"
+ " vperm %v13, %v1, %v0, %v12 \n"
+ " add %r4, %r4, %r5 \n"
+ "._L6: \n"
+ " li %r9, 15 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " stvx %v13, 0, %r3 \n"
+ " vperm %v13, %v1, %v0, %v12 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " stvx %v13, %r5, %r3 \n"
+ " vperm %v13, %v1, %v0, %v12 \n"
+ " add %r4, %r4, %r5 \n"
+ " add %r3, %r3, %r0 \n"
+ " bdnz ._L6 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v1, 0, %r4 \n"
+ " stvx %v13, 0, %r3 \n"
+ " vperm %v13, %v1, %v0, %v12 \n"
+ " stvx %v13, %r5, %r3 \n"
+ );
+}
+
+static void MC_put_8_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v12, 0, %r4 \n"
+ " lvsl %v1, %r5, %r4 \n"
+ " vmrghb %v12, %v12, %v12 \n"
+ " srawi %r6, %r6, 1 \n"
+ " li %r9, 7 \n"
+ " vmrghb %v1, %v1, %v1 \n"
+ " addi %r6, %r6, -1 \n"
+ " vpkuhum %v10, %v12, %v12 \n"
+ " lvx %v13, 0, %r4 \n"
+ " mtctr %r6 \n"
+ " vpkuhum %v11, %v1, %v1 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v12, %v13, %v0, %v10 \n"
+ "._L11: \n"
+ " li %r9, 7 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " stvewx %v12, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vperm %v1, %v13, %v0, %v11 \n"
+ " stvewx %v12, %r9, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " li %r9, 7 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " add %r3, %r3, %r5 \n"
+ " stvewx %v1, 0, %r3 \n"
+ " vperm %v12, %v13, %v0, %v10 \n"
+ " li %r9, 4 \n"
+ " stvewx %v1, %r9, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " add %r3, %r3, %r5 \n"
+ " bdnz ._L11 \n"
+ " li %r9, 7 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " stvewx %v12, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vperm %v1, %v13, %v0, %v11 \n"
+ " stvewx %v12, %r9, %r3 \n"
+ " add %r3, %r3, %r5 \n"
+ " stvewx %v1, 0, %r3 \n"
+ " stvewx %v1, %r9, %r3 \n"
+ );
+}
+
+static void MC_put_x16_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v11, 0, %r4 \n"
+ " vspltisb %v0, 1 \n"
+ " li %r9, 16 \n"
+ " lvx %v12, 0, %r4 \n"
+ " vaddubm %v10, %v11, %v0 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v1, %v12, %v13, %v10 \n"
+ " vperm %v0, %v12, %v13, %v11 \n"
+ " mtctr %r6 \n"
+ " add %r0, %r5, %r5 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ "._L16: \n"
+ " li %r9, 16 \n"
+ " lvx %v12, 0, %r4 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " stvx %v0, 0, %r3 \n"
+ " vperm %v1, %v12, %v13, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v0, %v12, %v13, %v11 \n"
+ " lvx %v12, 0, %r4 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " stvx %v0, %r5, %r3 \n"
+ " vperm %v1, %v12, %v13, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v0, %v12, %v13, %v11 \n"
+ " add %r3, %r3, %r0 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " bdnz ._L16 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " lvx %v12, 0, %r4 \n"
+ " stvx %v0, 0, %r3 \n"
+ " vperm %v1, %v12, %v13, %v10 \n"
+ " vperm %v0, %v12, %v13, %v11 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " stvx %v0, %r5, %r3 \n"
+ );
+}
+
+static void MC_put_x8_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v0, 0, %r4 \n"
+ " vspltisb %v13, 1 \n"
+ " lvsl %v10, %r5, %r4 \n"
+ " vmrghb %v0, %v0, %v0 \n"
+ " li %r9, 8 \n"
+ " lvx %v11, 0, %r4 \n"
+ " vmrghb %v10, %v10, %v10 \n"
+ " vpkuhum %v8, %v0, %v0 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " vpkuhum %v9, %v10, %v10 \n"
+ " vaddubm %v7, %v8, %v13 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v1, %v11, %v12, %v8 \n"
+ " mtctr %r6 \n"
+ " vaddubm %v13, %v9, %v13 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v0, %v11, %v12, %v7 \n"
+ " vavgub %v0, %v1, %v0 \n"
+ "._L21: \n"
+ " li %r9, 8 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " lvx %v11, 0, %r4 \n"
+ " stvewx %v0, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vperm %v1, %v11, %v12, %v13 \n"
+ " stvewx %v0, %r9, %r3 \n"
+ " vperm %v0, %v11, %v12, %v9 \n"
+ " add %r4, %r4, %r5 \n"
+ " li %r9, 8 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " vavgub %v10, %v0, %v1 \n"
+ " lvx %v11, 0, %r4 \n"
+ " add %r3, %r3, %r5 \n"
+ " stvewx %v10, 0, %r3 \n"
+ " vperm %v1, %v11, %v12, %v7 \n"
+ " vperm %v0, %v11, %v12, %v8 \n"
+ " li %r9, 4 \n"
+ " stvewx %v10, %r9, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " add %r3, %r3, %r5 \n"
+ " bdnz ._L21 \n"
+ " li %r9, 8 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " lvx %v11, 0, %r4 \n"
+ " stvewx %v0, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vperm %v1, %v11, %v12, %v13 \n"
+ " stvewx %v0, %r9, %r3 \n"
+ " vperm %v0, %v11, %v12, %v9 \n"
+ " add %r3, %r3, %r5 \n"
+ " vavgub %v10, %v0, %v1 \n"
+ " stvewx %v10, 0, %r3 \n"
+ " stvewx %v10, %r9, %r3 \n"
+ );
+}
+
+static void MC_put_y16_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " li %r9, 15 \n"
+ " lvsl %v10, 0, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v12, %v13, %v1, %v10 \n"
+ " srawi %r6, %r6, 1 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v11, %v13, %v1, %v10 \n"
+ " mtctr %r6 \n"
+ " add %r0, %r5, %r5 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v12, %v11 \n"
+ "._L26: \n"
+ " li %r9, 15 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " stvx %v0, 0, %r3 \n"
+ " vperm %v12, %v13, %v1, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " vavgub %v0, %v12, %v11 \n"
+ " stvx %v0, %r5, %r3 \n"
+ " vperm %v11, %v13, %v1, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " add %r3, %r3, %r0 \n"
+ " vavgub %v0, %v12, %v11 \n"
+ " bdnz ._L26 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " stvx %v0, 0, %r3 \n"
+ " vperm %v12, %v13, %v1, %v10 \n"
+ " vavgub %v0, %v12, %v11 \n"
+ " stvx %v0, %r5, %r3 \n"
+ );
+}
+
+static void MC_put_y8_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v13, 0, %r4 \n"
+ " lvsl %v11, %r5, %r4 \n"
+ " vmrghb %v13, %v13, %v13 \n"
+ " li %r9, 7 \n"
+ " lvx %v12, 0, %r4 \n"
+ " vmrghb %v11, %v11, %v11 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " vpkuhum %v9, %v13, %v13 \n"
+ " add %r4, %r4, %r5 \n"
+ " vpkuhum %v10, %v11, %v11 \n"
+ " vperm %v13, %v12, %v1, %v9 \n"
+ " srawi %r6, %r6, 1 \n"
+ " lvx %v12, 0, %r4 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v11, %v12, %v1, %v10 \n"
+ " mtctr %r6 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v13, %v11 \n"
+ "._L31: \n"
+ " li %r9, 7 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " lvx %v12, 0, %r4 \n"
+ " stvewx %v0, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vperm %v13, %v12, %v1, %v9 \n"
+ " stvewx %v0, %r9, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v13, %v11 \n"
+ " li %r9, 7 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " lvx %v12, 0, %r4 \n"
+ " add %r3, %r3, %r5 \n"
+ " stvewx %v0, 0, %r3 \n"
+ " vperm %v11, %v12, %v1, %v10 \n"
+ " li %r9, 4 \n"
+ " stvewx %v0, %r9, %r3 \n"
+ " vavgub %v0, %v13, %v11 \n"
+ " add %r4, %r4, %r5 \n"
+ " add %r3, %r3, %r5 \n"
+ " bdnz ._L31 \n"
+ " li %r9, 7 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " lvx %v12, 0, %r4 \n"
+ " stvewx %v0, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vperm %v13, %v12, %v1, %v9 \n"
+ " stvewx %v0, %r9, %r3 \n"
+ " add %r3, %r3, %r5 \n"
+ " vavgub %v0, %v13, %v11 \n"
+ " stvewx %v0, 0, %r3 \n"
+ " stvewx %v0, %r9, %r3 \n"
+ );
+}
+
+static void MC_put_xy16_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v5, 0, %r4 \n"
+ " vspltisb %v3, 1 \n"
+ " li %r9, 16 \n"
+ " lvx %v1, 0, %r4 \n"
+ " vaddubm %v4, %v5, %v3 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v10, %v1, %v0, %v4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " vperm %v11, %v1, %v0, %v5 \n"
+ " addi %r6, %r6, -1 \n"
+ " lvx %v1, 0, %r4 \n"
+ " mtctr %r6 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " add %r0, %r5, %r5 \n"
+ " vperm %v10, %v1, %v0, %v4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v11, %v1, %v0, %v5 \n"
+ " vxor %v6, %v11, %v10 \n"
+ " vavgub %v7, %v11, %v10 \n"
+ " vor %v0, %v8, %v6 \n"
+ " vxor %v13, %v9, %v7 \n"
+ " vand %v0, %v3, %v0 \n"
+ " vavgub %v1, %v9, %v7 \n"
+ " vand %v0, %v0, %v13 \n"
+ " vsububm %v13, %v1, %v0 \n"
+ "._L36: \n"
+ " li %r9, 16 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " stvx %v13, 0, %r3 \n"
+ " vperm %v10, %v1, %v0, %v4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v11, %v1, %v0, %v5 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v10, %v1, %v0, %v4 \n"
+ " vavgub %v12, %v9, %v7 \n"
+ " vperm %v11, %v1, %v0, %v5 \n"
+ " vor %v13, %v8, %v6 \n"
+ " vxor %v0, %v9, %v7 \n"
+ " vxor %v6, %v11, %v10 \n"
+ " vand %v13, %v3, %v13 \n"
+ " vavgub %v7, %v11, %v10 \n"
+ " vor %v1, %v8, %v6 \n"
+ " vand %v13, %v13, %v0 \n"
+ " vxor %v0, %v9, %v7 \n"
+ " vand %v1, %v3, %v1 \n"
+ " vsububm %v13, %v12, %v13 \n"
+ " vand %v1, %v1, %v0 \n"
+ " stvx %v13, %r5, %r3 \n"
+ " vavgub %v0, %v9, %v7 \n"
+ " add %r3, %r3, %r0 \n"
+ " vsububm %v13, %v0, %v1 \n"
+ " bdnz ._L36 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v1, 0, %r4 \n"
+ " stvx %v13, 0, %r3 \n"
+ " vperm %v10, %v1, %v0, %v4 \n"
+ " vperm %v11, %v1, %v0, %v5 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " vor %v0, %v8, %v6 \n"
+ " vxor %v13, %v9, %v7 \n"
+ " vand %v0, %v3, %v0 \n"
+ " vavgub %v1, %v9, %v7 \n"
+ " vand %v0, %v0, %v13 \n"
+ " vsububm %v13, %v1, %v0 \n"
+ " stvx %v13, %r5, %r3 \n"
+ );
+}
+
+static void MC_put_xy8_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v4, 0, %r4 \n"
+ " vspltisb %v3, 1 \n"
+ " lvsl %v5, %r5, %r4 \n"
+ " vmrghb %v4, %v4, %v4 \n"
+ " li %r9, 16 \n"
+ " vmrghb %v5, %v5, %v5 \n"
+ " lvx %v1, 0, %r4 \n"
+ " vpkuhum %v4, %v4, %v4 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " vpkuhum %v5, %v5, %v5 \n"
+ " add %r4, %r4, %r5 \n"
+ " vaddubm %v2, %v4, %v3 \n"
+ " vperm %v11, %v1, %v0, %v4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " vaddubm %v19, %v5, %v3 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v10, %v1, %v0, %v2 \n"
+ " mtctr %r6 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v10, %v1, %v0, %v19 \n"
+ " vperm %v11, %v1, %v0, %v5 \n"
+ " vxor %v6, %v11, %v10 \n"
+ " vavgub %v7, %v11, %v10 \n"
+ " vor %v0, %v8, %v6 \n"
+ " vxor %v13, %v9, %v7 \n"
+ " vand %v0, %v3, %v0 \n"
+ " vavgub %v1, %v9, %v7 \n"
+ " vand %v0, %v0, %v13 \n"
+ " vsububm %v13, %v1, %v0 \n"
+ "._L41: \n"
+ " li %r9, 16 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v1, 0, %r4 \n"
+ " stvewx %v13, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vperm %v10, %v1, %v0, %v2 \n"
+ " stvewx %v13, %r9, %r3 \n"
+ " vperm %v11, %v1, %v0, %v4 \n"
+ " add %r4, %r4, %r5 \n"
+ " li %r9, 16 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " lvx %v1, 0, %r4 \n"
+ " vavgub %v12, %v9, %v7 \n"
+ " vor %v13, %v8, %v6 \n"
+ " add %r3, %r3, %r5 \n"
+ " vperm %v10, %v1, %v0, %v19 \n"
+ " li %r9, 4 \n"
+ " vperm %v11, %v1, %v0, %v5 \n"
+ " vand %v13, %v3, %v13 \n"
+ " add %r4, %r4, %r5 \n"
+ " vxor %v0, %v9, %v7 \n"
+ " vxor %v6, %v11, %v10 \n"
+ " vavgub %v7, %v11, %v10 \n"
+ " vor %v1, %v8, %v6 \n"
+ " vand %v13, %v13, %v0 \n"
+ " vxor %v0, %v9, %v7 \n"
+ " vand %v1, %v3, %v1 \n"
+ " vsububm %v13, %v12, %v13 \n"
+ " vand %v1, %v1, %v0 \n"
+ " stvewx %v13, 0, %r3 \n"
+ " vavgub %v0, %v9, %v7 \n"
+ " stvewx %v13, %r9, %r3 \n"
+ " add %r3, %r3, %r5 \n"
+ " vsububm %v13, %v0, %v1 \n"
+ " bdnz ._L41 \n"
+ " li %r9, 16 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v1, 0, %r4 \n"
+ " stvewx %v13, 0, %r3 \n"
+ " vperm %v10, %v1, %v0, %v2 \n"
+ " li %r9, 4 \n"
+ " vperm %v11, %v1, %v0, %v4 \n"
+ " stvewx %v13, %r9, %r3 \n"
+ " add %r3, %r3, %r5 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " vor %v0, %v8, %v6 \n"
+ " vxor %v13, %v9, %v7 \n"
+ " vand %v0, %v3, %v0 \n"
+ " vavgub %v1, %v9, %v7 \n"
+ " vand %v0, %v0, %v13 \n"
+ " vsububm %v13, %v1, %v0 \n"
+ " stvewx %v13, 0, %r3 \n"
+ " stvewx %v13, %r9, %r3 \n"
+ );
+}
+
+static void MC_avg_16_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " li %r9, 15 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvsl %v11, 0, %r4 \n"
+ " lvx %v1, 0, %r4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v0, %v1, %v0, %v11 \n"
+ " lvx %v13, 0, %r3 \n"
+ " mtctr %r6 \n"
+ " add %r9, %r5, %r5 \n"
+ " vavgub %v12, %v13, %v0 \n"
+ " add %r4, %r4, %r5 \n"
+ "._L46: \n"
+ " li %r11, 15 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v0, %r11, %r4 \n"
+ " lvx %v13, %r5, %r3 \n"
+ " vperm %v0, %v1, %v0, %v11 \n"
+ " stvx %v12, 0, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v12, %v13, %v0 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v0, %r11, %r4 \n"
+ " lvx %v13, %r9, %r3 \n"
+ " vperm %v0, %v1, %v0, %v11 \n"
+ " stvx %v12, %r5, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v12, %v13, %v0 \n"
+ " add %r3, %r3, %r9 \n"
+ " bdnz ._L46 \n"
+ " lvx %v0, %r11, %r4 \n"
+ " lvx %v1, 0, %r4 \n"
+ " lvx %v13, %r5, %r3 \n"
+ " vperm %v0, %v1, %v0, %v11 \n"
+ " stvx %v12, 0, %r3 \n"
+ " vavgub %v12, %v13, %v0 \n"
+ " stvx %v12, %r5, %r3 \n"
+ );
+}
+
+static void MC_avg_8_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v12, 0, %r4 \n"
+ " li %r9, 7 \n"
+ " vmrghb %v12, %v12, %v12 \n"
+ " lvsl %v1, %r5, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " vpkuhum %v9, %v12, %v12 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " vmrghb %v1, %v1, %v1 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v0, %v13, %v0, %v9 \n"
+ " lvx %v11, 0, %r3 \n"
+ " mtctr %r6 \n"
+ " vpkuhum %v10, %v1, %v1 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v12, %v11, %v0 \n"
+ "._L51: \n"
+ " li %r9, 7 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v11, %r5, %r3 \n"
+ " stvewx %v12, 0, %r3 \n"
+ " vperm %v0, %v13, %v0, %v10 \n"
+ " li %r9, 4 \n"
+ " stvewx %v12, %r9, %r3 \n"
+ " vavgub %v1, %v11, %v0 \n"
+ " add %r4, %r4, %r5 \n"
+ " li %r9, 7 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " add %r3, %r3, %r5 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v11, %r5, %r3 \n"
+ " stvewx %v1, 0, %r3 \n"
+ " vperm %v0, %v13, %v0, %v9 \n"
+ " li %r9, 4 \n"
+ " stvewx %v1, %r9, %r3 \n"
+ " vavgub %v12, %v11, %v0 \n"
+ " add %r4, %r4, %r5 \n"
+ " add %r3, %r3, %r5 \n"
+ " bdnz ._L51 \n"
+ " li %r9, 7 \n"
+ " lvx %v0, %r9, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v11, %r5, %r3 \n"
+ " stvewx %v12, 0, %r3 \n"
+ " vperm %v0, %v13, %v0, %v10 \n"
+ " li %r9, 4 \n"
+ " stvewx %v12, %r9, %r3 \n"
+ " vavgub %v1, %v11, %v0 \n"
+ " add %r3, %r3, %r5 \n"
+ " stvewx %v1, 0, %r3 \n"
+ " stvewx %v1, %r9, %r3 \n"
+ );
+}
+
+static void MC_avg_x16_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v8, 0, %r4 \n"
+ " vspltisb %v0, 1 \n"
+ " li %r9, 16 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " vaddubm %v7, %v8, %v0 \n"
+ " lvx %v11, 0, %r4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " vperm %v1, %v11, %v12, %v7 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v0, %v11, %v12, %v8 \n"
+ " lvx %v9, 0, %r3 \n"
+ " mtctr %r6 \n"
+ " add %r9, %r5, %r5 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v10, %v9, %v0 \n"
+ "._L56: \n"
+ " li %r11, 16 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v12, %r11, %r4 \n"
+ " lvx %v9, %r5, %r3 \n"
+ " stvx %v10, 0, %r3 \n"
+ " vperm %v0, %v11, %v12, %v7 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v1, %v11, %v12, %v8 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v12, %r11, %r4 \n"
+ " vavgub %v1, %v1, %v0 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v13, %v11, %v12, %v7 \n"
+ " vavgub %v10, %v9, %v1 \n"
+ " vperm %v0, %v11, %v12, %v8 \n"
+ " lvx %v9, %r9, %r3 \n"
+ " stvx %v10, %r5, %r3 \n"
+ " vavgub %v0, %v0, %v13 \n"
+ " add %r3, %r3, %r9 \n"
+ " vavgub %v10, %v9, %v0 \n"
+ " bdnz ._L56 \n"
+ " lvx %v12, %r11, %r4 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v9, %r5, %r3 \n"
+ " vperm %v1, %v11, %v12, %v7 \n"
+ " stvx %v10, 0, %r3 \n"
+ " vperm %v0, %v11, %v12, %v8 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " vavgub %v10, %v9, %v0 \n"
+ " stvx %v10, %r5, %r3 \n"
+ );
+}
+
+static void MC_avg_x8_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v10, 0, %r4 \n"
+ " vspltisb %v13, 1 \n"
+ " li %r9, 8 \n"
+ " vmrghb %v10, %v10, %v10 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " vpkuhum %v7, %v10, %v10 \n"
+ " srawi %r6, %r6, 1 \n"
+ " lvsl %v10, %r5, %r4 \n"
+ " vaddubm %v6, %v7, %v13 \n"
+ " vperm %v0, %v11, %v12, %v7 \n"
+ " addi %r6, %r6, -1 \n"
+ " vmrghb %v10, %v10, %v10 \n"
+ " lvx %v9, 0, %r3 \n"
+ " mtctr %r6 \n"
+ " vperm %v1, %v11, %v12, %v6 \n"
+ " add %r4, %r4, %r5 \n"
+ " vpkuhum %v8, %v10, %v10 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " vaddubm %v13, %v8, %v13 \n"
+ " vavgub %v10, %v9, %v0 \n"
+ "._L61: \n"
+ " li %r9, 8 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v9, %r5, %r3 \n"
+ " stvewx %v10, 0, %r3 \n"
+ " vperm %v1, %v11, %v12, %v13 \n"
+ " vperm %v0, %v11, %v12, %v8 \n"
+ " li %r9, 4 \n"
+ " stvewx %v10, %r9, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " li %r9, 8 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " vavgub %v10, %v9, %v0 \n"
+ " lvx %v11, 0, %r4 \n"
+ " add %r3, %r3, %r5 \n"
+ " vperm %v1, %v11, %v12, %v6 \n"
+ " lvx %v9, %r5, %r3 \n"
+ " vperm %v0, %v11, %v12, %v7 \n"
+ " stvewx %v10, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " stvewx %v10, %r9, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " add %r3, %r3, %r5 \n"
+ " vavgub %v10, %v9, %v0 \n"
+ " bdnz ._L61 \n"
+ " li %r9, 8 \n"
+ " lvx %v12, %r9, %r4 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v9, %r5, %r3 \n"
+ " vperm %v1, %v11, %v12, %v13 \n"
+ " stvewx %v10, 0, %r3 \n"
+ " vperm %v0, %v11, %v12, %v8 \n"
+ " li %r9, 4 \n"
+ " stvewx %v10, %r9, %r3 \n"
+ " vavgub %v0, %v0, %v1 \n"
+ " add %r3, %r3, %r5 \n"
+ " vavgub %v10, %v9, %v0 \n"
+ " stvewx %v10, 0, %r3 \n"
+ " stvewx %v10, %r9, %r3 \n"
+ );
+}
+
+static void MC_avg_y16_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " li %r9, 15 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " lvsl %v9, 0, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v11, %v13, %v1, %v9 \n"
+ " li %r11, 15 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r11, %r4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " vperm %v10, %v13, %v1, %v9 \n"
+ " addi %r6, %r6, -1 \n"
+ " lvx %v12, 0, %r3 \n"
+ " mtctr %r6 \n"
+ " vavgub %v0, %v11, %v10 \n"
+ " add %r9, %r5, %r5 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v12, %v0 \n"
+ "._L66: \n"
+ " li %r11, 15 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r11, %r4 \n"
+ " lvx %v12, %r5, %r3 \n"
+ " vperm %v11, %v13, %v1, %v9 \n"
+ " stvx %v0, 0, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v11, %v10 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r11, %r4 \n"
+ " vavgub %v0, %v12, %v0 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v12, %r9, %r3 \n"
+ " vperm %v10, %v13, %v1, %v9 \n"
+ " stvx %v0, %r5, %r3 \n"
+ " vavgub %v0, %v11, %v10 \n"
+ " add %r3, %r3, %r9 \n"
+ " vavgub %v0, %v12, %v0 \n"
+ " bdnz ._L66 \n"
+ " lvx %v1, %r11, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v12, %r5, %r3 \n"
+ " vperm %v11, %v13, %v1, %v9 \n"
+ " stvx %v0, 0, %r3 \n"
+ " vavgub %v0, %v11, %v10 \n"
+ " vavgub %v0, %v12, %v0 \n"
+ " stvx %v0, %r5, %r3 \n"
+ );
+}
+
+static void MC_avg_y8_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v12, 0, %r4 \n"
+ " lvsl %v9, %r5, %r4 \n"
+ " vmrghb %v12, %v12, %v12 \n"
+ " li %r9, 7 \n"
+ " lvx %v11, 0, %r4 \n"
+ " vmrghb %v9, %v9, %v9 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " vpkuhum %v7, %v12, %v12 \n"
+ " add %r4, %r4, %r5 \n"
+ " vpkuhum %v8, %v9, %v9 \n"
+ " vperm %v12, %v11, %v13, %v7 \n"
+ " srawi %r6, %r6, 1 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v9, %v11, %v13, %v8 \n"
+ " lvx %v10, 0, %r3 \n"
+ " mtctr %r6 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v0, %v12, %v9 \n"
+ " vavgub %v1, %v10, %v0 \n"
+ "._L71: \n"
+ " li %r9, 7 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v10, %r5, %r3 \n"
+ " stvewx %v1, 0, %r3 \n"
+ " vperm %v12, %v11, %v13, %v7 \n"
+ " li %r9, 4 \n"
+ " stvewx %v1, %r9, %r3 \n"
+ " vavgub %v0, %v12, %v9 \n"
+ " add %r4, %r4, %r5 \n"
+ " li %r9, 7 \n"
+ " vavgub %v1, %v10, %v0 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " lvx %v11, 0, %r4 \n"
+ " add %r3, %r3, %r5 \n"
+ " vperm %v9, %v11, %v13, %v8 \n"
+ " lvx %v10, %r5, %r3 \n"
+ " stvewx %v1, 0, %r3 \n"
+ " vavgub %v0, %v12, %v9 \n"
+ " li %r9, 4 \n"
+ " stvewx %v1, %r9, %r3 \n"
+ " add %r4, %r4, %r5 \n"
+ " vavgub %v1, %v10, %v0 \n"
+ " add %r3, %r3, %r5 \n"
+ " bdnz ._L71 \n"
+ " li %r9, 7 \n"
+ " lvx %v13, %r9, %r4 \n"
+ " lvx %v11, 0, %r4 \n"
+ " lvx %v10, %r5, %r3 \n"
+ " vperm %v12, %v11, %v13, %v7 \n"
+ " stvewx %v1, 0, %r3 \n"
+ " li %r9, 4 \n"
+ " vavgub %v0, %v12, %v9 \n"
+ " stvewx %v1, %r9, %r3 \n"
+ " add %r3, %r3, %r5 \n"
+ " vavgub %v1, %v10, %v0 \n"
+ " stvewx %v1, 0, %r3 \n"
+ " stvewx %v1, %r9, %r3 \n"
+ );
+}
+
+static void MC_avg_xy16_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v4, 0, %r4 \n"
+ " vspltisb %v2, 1 \n"
+ " li %r9, 16 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " vaddubm %v3, %v4, %v2 \n"
+ " lvx %v13, 0, %r4 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v10, %v13, %v1, %v3 \n"
+ " li %r11, 16 \n"
+ " vperm %v11, %v13, %v1, %v4 \n"
+ " srawi %r6, %r6, 1 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r11, %r4 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v10, %v13, %v1, %v3 \n"
+ " lvx %v6, 0, %r3 \n"
+ " mtctr %r6 \n"
+ " vperm %v11, %v13, %v1, %v4 \n"
+ " add %r9, %r5, %r5 \n"
+ " add %r4, %r4, %r5 \n"
+ " vxor %v5, %v11, %v10 \n"
+ " vavgub %v7, %v11, %v10 \n"
+ " vor %v1, %v8, %v5 \n"
+ " vxor %v13, %v9, %v7 \n"
+ " vand %v1, %v2, %v1 \n"
+ " vavgub %v0, %v9, %v7 \n"
+ " vand %v1, %v1, %v13 \n"
+ " vsububm %v0, %v0, %v1 \n"
+ " vavgub %v12, %v6, %v0 \n"
+ "._L76: \n"
+ " li %r11, 16 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r11, %r4 \n"
+ " lvx %v6, %r5, %r3 \n"
+ " stvx %v12, 0, %r3 \n"
+ " vperm %v10, %v13, %v1, %v3 \n"
+ " vperm %v11, %v13, %v1, %v4 \n"
+ " add %r4, %r4, %r5 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v1, %r11, %r4 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v10, %v13, %v1, %v3 \n"
+ " vavgub %v12, %v9, %v7 \n"
+ " vperm %v11, %v13, %v1, %v4 \n"
+ " vor %v0, %v8, %v5 \n"
+ " vxor %v13, %v9, %v7 \n"
+ " vxor %v5, %v11, %v10 \n"
+ " vand %v0, %v2, %v0 \n"
+ " vavgub %v7, %v11, %v10 \n"
+ " vor %v1, %v8, %v5 \n"
+ " vand %v0, %v0, %v13 \n"
+ " vand %v1, %v2, %v1 \n"
+ " vxor %v13, %v9, %v7 \n"
+ " vsububm %v12, %v12, %v0 \n"
+ " vand %v1, %v1, %v13 \n"
+ " vavgub %v0, %v9, %v7 \n"
+ " vavgub %v12, %v6, %v12 \n"
+ " lvx %v6, %r9, %r3 \n"
+ " vsububm %v0, %v0, %v1 \n"
+ " stvx %v12, %r5, %r3 \n"
+ " vavgub %v12, %v6, %v0 \n"
+ " add %r3, %r3, %r9 \n"
+ " bdnz ._L76 \n"
+ " lvx %v1, %r11, %r4 \n"
+ " lvx %v13, 0, %r4 \n"
+ " lvx %v6, %r5, %r3 \n"
+ " vperm %v10, %v13, %v1, %v3 \n"
+ " stvx %v12, 0, %r3 \n"
+ " vperm %v11, %v13, %v1, %v4 \n"
+ " vxor %v8, %v11, %v10 \n"
+ " vavgub %v9, %v11, %v10 \n"
+ " vor %v0, %v8, %v5 \n"
+ " vxor %v13, %v9, %v7 \n"
+ " vand %v0, %v2, %v0 \n"
+ " vavgub %v1, %v9, %v7 \n"
+ " vand %v0, %v0, %v13 \n"
+ " vsububm %v1, %v1, %v0 \n"
+ " vavgub %v12, %v6, %v1 \n"
+ " stvx %v12, %r5, %r3 \n"
+ );
+}
+
+static void MC_avg_xy8_altivec (uint8_t * dest, uint8_t * ref,
+ int stride, int height)
+{
+ asm (" \n"
+ " lvsl %v2, 0, %r4 \n"
+ " vspltisb %v19, 1 \n"
+ " lvsl %v3, %r5, %r4 \n"
+ " vmrghb %v2, %v2, %v2 \n"
+ " li %r9, 16 \n"
+ " vmrghb %v3, %v3, %v3 \n"
+ " lvx %v9, 0, %r4 \n"
+ " vpkuhum %v2, %v2, %v2 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " vpkuhum %v3, %v3, %v3 \n"
+ " add %r4, %r4, %r5 \n"
+ " vaddubm %v18, %v2, %v19 \n"
+ " vperm %v11, %v9, %v1, %v2 \n"
+ " srawi %r6, %r6, 1 \n"
+ " vaddubm %v17, %v3, %v19 \n"
+ " addi %r6, %r6, -1 \n"
+ " vperm %v10, %v9, %v1, %v18 \n"
+ " lvx %v4, 0, %r3 \n"
+ " mtctr %r6 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " lvx %v9, 0, %r4 \n"
+ " vavgub %v8, %v11, %v10 \n"
+ " vxor %v7, %v11, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " vperm %v10, %v9, %v1, %v17 \n"
+ " vperm %v11, %v9, %v1, %v3 \n"
+ " vxor %v5, %v11, %v10 \n"
+ " vavgub %v6, %v11, %v10 \n"
+ " vor %v1, %v7, %v5 \n"
+ " vxor %v13, %v8, %v6 \n"
+ " vand %v1, %v19, %v1 \n"
+ " vavgub %v0, %v8, %v6 \n"
+ " vand %v1, %v1, %v13 \n"
+ " vsububm %v0, %v0, %v1 \n"
+ " vavgub %v13, %v4, %v0 \n"
+ "._L81: \n"
+ " li %r9, 16 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " lvx %v9, 0, %r4 \n"
+ " lvx %v4, %r5, %r3 \n"
+ " stvewx %v13, 0, %r3 \n"
+ " vperm %v10, %v9, %v1, %v18 \n"
+ " vperm %v11, %v9, %v1, %v2 \n"
+ " li %r9, 4 \n"
+ " stvewx %v13, %r9, %r3 \n"
+ " vxor %v7, %v11, %v10 \n"
+ " add %r4, %r4, %r5 \n"
+ " li %r9, 16 \n"
+ " vavgub %v8, %v11, %v10 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " vor %v0, %v7, %v5 \n"
+ " lvx %v9, 0, %r4 \n"
+ " vxor %v12, %v8, %v6 \n"
+ " vand %v0, %v19, %v0 \n"
+ " add %r3, %r3, %r5 \n"
+ " vperm %v10, %v9, %v1, %v17 \n"
+ " vavgub %v13, %v8, %v6 \n"
+ " li %r9, 4 \n"
+ " vperm %v11, %v9, %v1, %v3 \n"
+ " vand %v0, %v0, %v12 \n"
+ " add %r4, %r4, %r5 \n"
+ " vxor %v5, %v11, %v10 \n"
+ " vavgub %v6, %v11, %v10 \n"
+ " vor %v1, %v7, %v5 \n"
+ " vsububm %v13, %v13, %v0 \n"
+ " vxor %v0, %v8, %v6 \n"
+ " vand %v1, %v19, %v1 \n"
+ " vavgub %v13, %v4, %v13 \n"
+ " vand %v1, %v1, %v0 \n"
+ " lvx %v4, %r5, %r3 \n"
+ " vavgub %v0, %v8, %v6 \n"
+ " stvewx %v13, 0, %r3 \n"
+ " stvewx %v13, %r9, %r3 \n"
+ " vsububm %v0, %v0, %v1 \n"
+ " add %r3, %r3, %r5 \n"
+ " vavgub %v13, %v4, %v0 \n"
+ " bdnz ._L81 \n"
+ " li %r9, 16 \n"
+ " lvx %v1, %r9, %r4 \n"
+ " lvx %v9, 0, %r4 \n"
+ " lvx %v4, %r5, %r3 \n"
+ " vperm %v10, %v9, %v1, %v18 \n"
+ " stvewx %v13, 0, %r3 \n"
+ " vperm %v11, %v9, %v1, %v2 \n"
+ " li %r9, 4 \n"
+ " stvewx %v13, %r9, %r3 \n"
+ " vxor %v7, %v11, %v10 \n"
+ " add %r3, %r3, %r5 \n"
+ " vavgub %v8, %v11, %v10 \n"
+ " vor %v0, %v7, %v5 \n"
+ " vxor %v13, %v8, %v6 \n"
+ " vand %v0, %v19, %v0 \n"
+ " vavgub %v1, %v8, %v6 \n"
+ " vand %v0, %v0, %v13 \n"
+ " vsububm %v1, %v1, %v0 \n"
+ " vavgub %v13, %v4, %v1 \n"
+ " stvewx %v13, 0, %r3 \n"
+ " stvewx %v13, %r9, %r3 \n"
+ );
+}
+
+MOTION_COMP_EXTERN (altivec)
+
+#endif /* ENABLE_ALTIVEC */
+#endif /* ARCH_PPC */
+
+#else /* __ALTIVEC__ */
+
+#define vector_s16_t vector signed short
+#define vector_u16_t vector unsigned short
+#define vector_s8_t vector signed char
+#define vector_u8_t vector unsigned char
+#define vector_s32_t vector signed int
+#define vector_u32_t vector unsigned int
+
+void MC_put_o_16_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm, ref0, ref1, tmp;
+
+ perm = vec_lvsl (0, ref);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ tmp = vec_perm (ref0, ref1, perm);
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ vec_st (tmp, 0, dest);
+ tmp = vec_perm (ref0, ref1, perm);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ vec_st (tmp, stride, dest);
+ dest += 2*stride;
+ tmp = vec_perm (ref0, ref1, perm);
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ vec_st (tmp, 0, dest);
+ tmp = vec_perm (ref0, ref1, perm);
+ vec_st (tmp, stride, dest);
+}
+
+void MC_put_o_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm0, perm1, tmp0, tmp1, ref0, ref1;
+
+ tmp0 = vec_lvsl (0, ref);
+ tmp0 = vec_mergeh (tmp0, tmp0);
+ perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+ tmp1 = vec_lvsl (stride, ref);
+ tmp1 = vec_mergeh (tmp1, tmp1);
+ perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ tmp0 = vec_perm (ref0, ref1, perm0);
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_perm (ref0, ref1, perm1);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp0 = vec_perm (ref0, ref1, perm0);
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_perm (ref0, ref1, perm1);
+ vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+}
+
+void MC_put_x_16_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t permA, permB, ref0, ref1, tmp;
+
+ permA = vec_lvsl (0, ref);
+ permB = vec_add (permA, vec_splat_u8 (1));
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ tmp = vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB));
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ vec_st (tmp, 0, dest);
+ tmp = vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ vec_st (tmp, stride, dest);
+ dest += 2*stride;
+ tmp = vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ vec_st (tmp, 0, dest);
+ tmp = vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB));
+ vec_st (tmp, stride, dest);
+}
+
+void MC_put_x_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm0A, perm0B, perm1A, perm1B, ones, tmp0, tmp1, ref0, ref1;
+
+ ones = vec_splat_u8 (1);
+ tmp0 = vec_lvsl (0, ref);
+ tmp0 = vec_mergeh (tmp0, tmp0);
+ perm0A = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+ perm0B = vec_add (perm0A, ones);
+ tmp1 = vec_lvsl (stride, ref);
+ tmp1 = vec_mergeh (tmp1, tmp1);
+ perm1A = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+ perm1B = vec_add (perm1A, ones);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (8, ref);
+ ref += stride;
+ tmp0 = vec_avg (vec_perm (ref0, ref1, perm0A),
+ vec_perm (ref0, ref1, perm0B));
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (8, ref);
+ ref += stride;
+ vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_avg (vec_perm (ref0, ref1, perm1A),
+ vec_perm (ref0, ref1, perm1B));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (8, ref);
+ ref += stride;
+ vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp0 = vec_avg (vec_perm (ref0, ref1, perm0A),
+ vec_perm (ref0, ref1, perm0B));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (8, ref);
+ vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_avg (vec_perm (ref0, ref1, perm1A),
+ vec_perm (ref0, ref1, perm1B));
+ vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+}
+
+void MC_put_y_16_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm, ref0, ref1, tmp0, tmp1, tmp;
+
+ perm = vec_lvsl (0, ref);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ tmp0 = vec_perm (ref0, ref1, perm);
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ tmp1 = vec_perm (ref0, ref1, perm);
+ tmp = vec_avg (tmp0, tmp1);
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ vec_st (tmp, 0, dest);
+ tmp0 = vec_perm (ref0, ref1, perm);
+ tmp = vec_avg (tmp0, tmp1);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ vec_st (tmp, stride, dest);
+ dest += 2*stride;
+ tmp1 = vec_perm (ref0, ref1, perm);
+ tmp = vec_avg (tmp0, tmp1);
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ vec_st (tmp, 0, dest);
+ tmp0 = vec_perm (ref0, ref1, perm);
+ tmp = vec_avg (tmp0, tmp1);
+ vec_st (tmp, stride, dest);
+}
+
+void MC_put_y_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm0, perm1, tmp0, tmp1, tmp, ref0, ref1;
+
+ tmp0 = vec_lvsl (0, ref);
+ tmp0 = vec_mergeh (tmp0, tmp0);
+ perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+ tmp1 = vec_lvsl (stride, ref);
+ tmp1 = vec_mergeh (tmp1, tmp1);
+ perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ tmp0 = vec_perm (ref0, ref1, perm0);
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ tmp1 = vec_perm (ref0, ref1, perm1);
+ tmp = vec_avg (tmp0, tmp1);
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp0 = vec_perm (ref0, ref1, perm0);
+ tmp = vec_avg (tmp0, tmp1);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_perm (ref0, ref1, perm1);
+ tmp = vec_avg (tmp0, tmp1);
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp0 = vec_perm (ref0, ref1, perm0);
+ tmp = vec_avg (tmp0, tmp1);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+}
+
+void MC_put_xy_16_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t permA, permB, ref0, ref1, A, B, avg0, avg1, xor0, xor1, tmp;
+ vector_u8_t ones;
+
+ ones = vec_splat_u8 (1);
+ permA = vec_lvsl (0, ref);
+ permB = vec_add (permA, ones);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg1 = vec_avg (A, B);
+ xor1 = vec_xor (A, B);
+ tmp = vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1)));
+
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ vec_st (tmp, 0, dest);
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+ tmp = vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1)));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ vec_st (tmp, stride, dest);
+ dest += 2*stride;
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg1 = vec_avg (A, B);
+ xor1 = vec_xor (A, B);
+ tmp = vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1)));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ vec_st (tmp, 0, dest);
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+ tmp = vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1)));
+ vec_st (tmp, stride, dest);
+}
+
+void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm0A, perm0B, perm1A, perm1B, ref0, ref1, A, B;
+ vector_u8_t avg0, avg1, xor0, xor1, tmp, ones;
+
+ ones = vec_splat_u8 (1);
+ perm0A = vec_lvsl (0, ref);
+ perm0A = vec_mergeh (perm0A, perm0A);
+ perm0A = vec_pack ((vector_u16_t)perm0A, (vector_u16_t)perm0A);
+ perm0B = vec_add (perm0A, ones);
+ perm1A = vec_lvsl (stride, ref);
+ perm1A = vec_mergeh (perm1A, perm1A);
+ perm1A = vec_pack ((vector_u16_t)perm1A, (vector_u16_t)perm1A);
+ perm1B = vec_add (perm1A, ones);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ A = vec_perm (ref0, ref1, perm0A);
+ B = vec_perm (ref0, ref1, perm0B);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ A = vec_perm (ref0, ref1, perm1A);
+ B = vec_perm (ref0, ref1, perm1B);
+ avg1 = vec_avg (A, B);
+ xor1 = vec_xor (A, B);
+ tmp = vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1)));
+
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ A = vec_perm (ref0, ref1, perm0A);
+ B = vec_perm (ref0, ref1, perm0B);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+ tmp = vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1)));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ A = vec_perm (ref0, ref1, perm1A);
+ B = vec_perm (ref0, ref1, perm1B);
+ avg1 = vec_avg (A, B);
+ xor1 = vec_xor (A, B);
+ tmp = vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1)));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ A = vec_perm (ref0, ref1, perm0A);
+ B = vec_perm (ref0, ref1, perm0B);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+ tmp = vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1)));
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+}
+
+#if 0
+void MC_put_xy_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t permA, permB, ref0, ref1, A, B, C, D, tmp, zero, ones;
+ vector_u16_t splat2, temp;
+
+ ones = vec_splat_u8 (1);
+ permA = vec_lvsl (0, ref);
+ permB = vec_add (permA, ones);
+
+ zero = vec_splat_u8 (0);
+ splat2 = vec_splat_u16 (2);
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ C = vec_perm (ref0, ref1, permA);
+ D = vec_perm (ref0, ref1, permB);
+
+ temp = vec_add (vec_add ((vector_u16_t)vec_mergeh (zero, A),
+ (vector_u16_t)vec_mergeh (zero, B)),
+ vec_add ((vector_u16_t)vec_mergeh (zero, C),
+ (vector_u16_t)vec_mergeh (zero, D)));
+ temp = vec_sr (vec_add (temp, splat2), splat2);
+ tmp = vec_pack (temp, temp);
+
+ vec_st (tmp, 0, dest);
+ dest += stride;
+ tmp = vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB));
+ } while (--height);
+}
+#endif
+
+void MC_avg_o_16_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm, ref0, ref1, tmp, prev;
+
+ perm = vec_lvsl (0, ref);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ prev = vec_ld (0, dest);
+ tmp = vec_avg (prev, vec_perm (ref0, ref1, perm));
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_st (tmp, 0, dest);
+ tmp = vec_avg (prev, vec_perm (ref0, ref1, perm));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ prev = vec_ld (2*stride, dest);
+ vec_st (tmp, stride, dest);
+ dest += 2*stride;
+ tmp = vec_avg (prev, vec_perm (ref0, ref1, perm));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ prev = vec_ld (stride, dest);
+ vec_st (tmp, 0, dest);
+ tmp = vec_avg (prev, vec_perm (ref0, ref1, perm));
+ vec_st (tmp, stride, dest);
+}
+
+void MC_avg_o_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm0, perm1, tmp0, tmp1, ref0, ref1, prev;
+
+ tmp0 = vec_lvsl (0, ref);
+ tmp0 = vec_mergeh (tmp0, tmp0);
+ perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+ tmp1 = vec_lvsl (stride, ref);
+ tmp1 = vec_mergeh (tmp1, tmp1);
+ perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ prev = vec_ld (0, dest);
+ tmp0 = vec_avg (prev, vec_perm (ref0, ref1, perm0));
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_avg (prev, vec_perm (ref0, ref1, perm1));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp0 = vec_avg (prev, vec_perm (ref0, ref1, perm0));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_avg (prev, vec_perm (ref0, ref1, perm1));
+ vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+}
+
+void MC_avg_x_16_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t permA, permB, ref0, ref1, tmp, prev;
+
+ permA = vec_lvsl (0, ref);
+ permB = vec_add (permA, vec_splat_u8 (1));
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ prev = vec_ld (0, dest);
+ ref += stride;
+ tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB)));
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_st (tmp, 0, dest);
+ tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB)));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ prev = vec_ld (2*stride, dest);
+ vec_st (tmp, stride, dest);
+ dest += 2*stride;
+ tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB)));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ prev = vec_ld (stride, dest);
+ vec_st (tmp, 0, dest);
+ tmp = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, permA),
+ vec_perm (ref0, ref1, permB)));
+ vec_st (tmp, stride, dest);
+}
+
+void MC_avg_x_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm0A, perm0B, perm1A, perm1B, ones, tmp0, tmp1, ref0, ref1;
+ vector_u8_t prev;
+
+ ones = vec_splat_u8 (1);
+ tmp0 = vec_lvsl (0, ref);
+ tmp0 = vec_mergeh (tmp0, tmp0);
+ perm0A = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+ perm0B = vec_add (perm0A, ones);
+ tmp1 = vec_lvsl (stride, ref);
+ tmp1 = vec_mergeh (tmp1, tmp1);
+ perm1A = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+ perm1B = vec_add (perm1A, ones);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (8, ref);
+ prev = vec_ld (0, dest);
+ ref += stride;
+ tmp0 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm0A),
+ vec_perm (ref0, ref1, perm0B)));
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (8, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm1A),
+ vec_perm (ref0, ref1, perm1B)));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (8, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp0 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm0A),
+ vec_perm (ref0, ref1, perm0B)));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (8, ref);
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp0, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp0, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_avg (prev, vec_avg (vec_perm (ref0, ref1, perm1A),
+ vec_perm (ref0, ref1, perm1B)));
+ vec_ste ((vector_u32_t)tmp1, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp1, 4, (unsigned int *)dest);
+}
+
+void MC_avg_y_16_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm, ref0, ref1, tmp0, tmp1, tmp, prev;
+
+ perm = vec_lvsl (0, ref);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ tmp0 = vec_perm (ref0, ref1, perm);
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ prev = vec_ld (0, dest);
+ tmp1 = vec_perm (ref0, ref1, perm);
+ tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_st (tmp, 0, dest);
+ tmp0 = vec_perm (ref0, ref1, perm);
+ tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ ref += stride;
+ prev = vec_ld (2*stride, dest);
+ vec_st (tmp, stride, dest);
+ dest += 2*stride;
+ tmp1 = vec_perm (ref0, ref1, perm);
+ tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (15, ref);
+ prev = vec_ld (stride, dest);
+ vec_st (tmp, 0, dest);
+ tmp0 = vec_perm (ref0, ref1, perm);
+ tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+ vec_st (tmp, stride, dest);
+}
+
+void MC_avg_y_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm0, perm1, tmp0, tmp1, tmp, ref0, ref1, prev;
+
+ tmp0 = vec_lvsl (0, ref);
+ tmp0 = vec_mergeh (tmp0, tmp0);
+ perm0 = vec_pack ((vector_u16_t)tmp0, (vector_u16_t)tmp0);
+ tmp1 = vec_lvsl (stride, ref);
+ tmp1 = vec_mergeh (tmp1, tmp1);
+ perm1 = vec_pack ((vector_u16_t)tmp1, (vector_u16_t)tmp1);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ tmp0 = vec_perm (ref0, ref1, perm0);
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ prev = vec_ld (0, dest);
+ tmp1 = vec_perm (ref0, ref1, perm1);
+ tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp0 = vec_perm (ref0, ref1, perm0);
+ tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp1 = vec_perm (ref0, ref1, perm1);
+ tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (7, ref);
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ tmp0 = vec_perm (ref0, ref1, perm0);
+ tmp = vec_avg (prev, vec_avg (tmp0, tmp1));
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+}
+
+void MC_avg_xy_16_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t permA, permB, ref0, ref1, A, B, avg0, avg1, xor0, xor1, tmp;
+ vector_u8_t ones, prev;
+
+ ones = vec_splat_u8 (1);
+ permA = vec_lvsl (0, ref);
+ permB = vec_add (permA, ones);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ prev = vec_ld (0, dest);
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg1 = vec_avg (A, B);
+ xor1 = vec_xor (A, B);
+ tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1))));
+
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_st (tmp, 0, dest);
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+ tmp = vec_avg (prev,
+ vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1))));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ prev = vec_ld (2*stride, dest);
+ vec_st (tmp, stride, dest);
+ dest += 2*stride;
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg1 = vec_avg (A, B);
+ xor1 = vec_xor (A, B);
+ tmp = vec_avg (prev,
+ vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1))));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ prev = vec_ld (stride, dest);
+ vec_st (tmp, 0, dest);
+ A = vec_perm (ref0, ref1, permA);
+ B = vec_perm (ref0, ref1, permB);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+ tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1))));
+ vec_st (tmp, stride, dest);
+}
+
+void MC_avg_xy_8_altivec (unsigned char * dest, unsigned char * ref,
+ int stride, int height)
+{
+ vector_u8_t perm0A, perm0B, perm1A, perm1B, ref0, ref1, A, B;
+ vector_u8_t avg0, avg1, xor0, xor1, tmp, ones, prev;
+
+ ones = vec_splat_u8 (1);
+ perm0A = vec_lvsl (0, ref);
+ perm0A = vec_mergeh (perm0A, perm0A);
+ perm0A = vec_pack ((vector_u16_t)perm0A, (vector_u16_t)perm0A);
+ perm0B = vec_add (perm0A, ones);
+ perm1A = vec_lvsl (stride, ref);
+ perm1A = vec_mergeh (perm1A, perm1A);
+ perm1A = vec_pack ((vector_u16_t)perm1A, (vector_u16_t)perm1A);
+ perm1B = vec_add (perm1A, ones);
+
+ height = (height >> 1) - 1;
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ A = vec_perm (ref0, ref1, perm0A);
+ B = vec_perm (ref0, ref1, perm0B);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ prev = vec_ld (0, dest);
+ A = vec_perm (ref0, ref1, perm1A);
+ B = vec_perm (ref0, ref1, perm1B);
+ avg1 = vec_avg (A, B);
+ xor1 = vec_xor (A, B);
+ tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1))));
+
+
+ do {
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ A = vec_perm (ref0, ref1, perm0A);
+ B = vec_perm (ref0, ref1, perm0B);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+ tmp = vec_avg (prev,
+ vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1))));
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ ref += stride;
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ A = vec_perm (ref0, ref1, perm1A);
+ B = vec_perm (ref0, ref1, perm1B);
+ avg1 = vec_avg (A, B);
+ xor1 = vec_xor (A, B);
+ tmp = vec_avg (prev,
+ vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1))));
+ } while (--height);
+
+ ref0 = vec_ld (0, ref);
+ ref1 = vec_ld (16, ref);
+ prev = vec_ld (stride, dest);
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+ dest += stride;
+ A = vec_perm (ref0, ref1, perm0A);
+ B = vec_perm (ref0, ref1, perm0B);
+ avg0 = vec_avg (A, B);
+ xor0 = vec_xor (A, B);
+ tmp = vec_avg (prev, vec_sub (vec_avg (avg0, avg1),
+ vec_and (vec_and (ones, vec_or (xor0, xor1)),
+ vec_xor (avg0, avg1))));
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest);
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+}
+
+#endif /* __ALTIVEC__ */
diff --git a/src/libmpeg2/mpeg2_internal.h b/src/libmpeg2/mpeg2_internal.h
index 8e7ce0d11..d9bc71c62 100644
--- a/src/libmpeg2/mpeg2_internal.h
+++ b/src/libmpeg2/mpeg2_internal.h
@@ -170,6 +170,13 @@ void idct_block_copy_mmx (int16_t *block, uint8_t * dest, int stride);
void idct_block_add_mmx (int16_t *block, uint8_t * dest, int stride);
void idct_mmx_init (void);
+#ifdef ENABLE_ALTIVEC
+/* idct_altivec.c */
+void idct_block_copy_altivec (int16_t * block, uint8_t * dest, int stride);
+void idct_block_add_altivec (int16_t * block, uint8_t * dest, int stride);
+void idct_altivec_init (void);
+#endif
+
/* motion_comp.c */
void motion_comp_init (void);
@@ -191,6 +198,9 @@ extern mc_functions_t mc_functions_c;
extern mc_functions_t mc_functions_mmx;
extern mc_functions_t mc_functions_mmxext;
extern mc_functions_t mc_functions_3dnow;
+#ifdef ENABLE_ALTIVEC
+extern mc_functions_t mc_functions_altivec;
+#endif
extern mc_functions_t mc_functions_mlib;
/* slice.c */
diff --git a/src/xine-utils/cpu_accel.c b/src/xine-utils/cpu_accel.c
index 5fa5a45ed..41baa5d5e 100644
--- a/src/xine-utils/cpu_accel.c
+++ b/src/xine-utils/cpu_accel.c
@@ -121,33 +121,31 @@ static void sigill_handler (int n) {
uint32_t xine_mm_accel (void)
{
#ifdef ARCH_X86
- static int got_accel = 0;
static uint32_t accel;
- if (!got_accel) {
- got_accel = 1;
-
- accel = x86_accel ();
-
- /* test OS support for SSE */
- if( accel & MM_ACCEL_X86_SSE ) {
- if (setjmp(sigill_return)) {
- accel &= ~(MM_ACCEL_X86_SSE|MM_ACCEL_X86_SSE2);
- } else {
- signal (SIGILL, sigill_handler);
- __asm __volatile ("xorps %xmm0, %xmm0");
- signal (SIGILL, SIG_DFL);
- }
+ accel = x86_accel ();
+
+ /* test OS support for SSE */
+ if( accel & MM_ACCEL_X86_SSE ) {
+ if (setjmp(sigill_return)) {
+ accel &= ~(MM_ACCEL_X86_SSE|MM_ACCEL_X86_SSE2);
+ } else {
+ signal (SIGILL, sigill_handler);
+ __asm __volatile ("xorps %xmm0, %xmm0");
+ signal (SIGILL, SIG_DFL);
}
}
return accel;
-#else
+#endif
#ifdef HAVE_MLIB
return MM_ACCEL_MLIB;
-#else
- return 0;
+#endif
+#ifdef ARCH_PPC
+#ifdef ENABLE_ALTIVEC
+ return MM_ACCEL_PPC_ALTIVEC;
#endif
#endif
+ return 0;
}
diff --git a/src/xine-utils/xineutils.h b/src/xine-utils/xineutils.h
index a4a689f4a..634cf0c74 100644
--- a/src/xine-utils/xineutils.h
+++ b/src/xine-utils/xineutils.h
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
- * $Id: xineutils.h,v 1.4 2001/11/30 00:53:51 f1rmb Exp $
+ * $Id: xineutils.h,v 1.5 2001/12/05 15:12:03 guenter Exp $
*
*/
#ifndef XINEUTILS_H
@@ -49,6 +49,8 @@ extern "C" {
#define MM_ACCEL_X86_MMXEXT 0x20000000
#define MM_ACCEL_X86_SSE 0x10000000
#define MM_ACCEL_X86_SSE2 0x08000000
+/* powerpc accelerations */
+#define MM_ACCEL_PPC_ALTIVEC 0x04000000
/* x86 compat defines */
#define MM_MMX MM_ACCEL_X86_MMX
#define MM_3DNOW MM_ACCEL_X86_3DNOW