summaryrefslogtreecommitdiff
path: root/contrib/ffmpeg/libswscale
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/ffmpeg/libswscale')
-rw-r--r--contrib/ffmpeg/libswscale/Makefile14
-rw-r--r--contrib/ffmpeg/libswscale/cs_test.c344
-rw-r--r--contrib/ffmpeg/libswscale/internal_bfin.S607
-rw-r--r--contrib/ffmpeg/libswscale/rgb2rgb.c749
-rw-r--r--contrib/ffmpeg/libswscale/rgb2rgb.h81
-rw-r--r--contrib/ffmpeg/libswscale/rgb2rgb_template.c4667
-rw-r--r--contrib/ffmpeg/libswscale/swscale-example.c328
-rw-r--r--contrib/ffmpeg/libswscale/swscale.c4507
-rw-r--r--contrib/ffmpeg/libswscale/swscale.h138
-rw-r--r--contrib/ffmpeg/libswscale/swscale_altivec_template.c906
-rw-r--r--contrib/ffmpeg/libswscale/swscale_bfin.c94
-rw-r--r--contrib/ffmpeg/libswscale/swscale_internal.h318
-rw-r--r--contrib/ffmpeg/libswscale/swscale_template.c5423
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb.c949
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_altivec.c1352
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_bfin.c207
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_init.c412
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_mlib.c88
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_template.c744
-rw-r--r--contrib/ffmpeg/libswscale/yuv2rgb_vis.c206
20 files changed, 11407 insertions, 10727 deletions
diff --git a/contrib/ffmpeg/libswscale/Makefile b/contrib/ffmpeg/libswscale/Makefile
index a1c25a76a..14a65b92c 100644
--- a/contrib/ffmpeg/libswscale/Makefile
+++ b/contrib/ffmpeg/libswscale/Makefile
@@ -7,18 +7,24 @@ LIBMAJOR=$(SWSMAJOR)
EXTRALIBS := -L$(BUILD_ROOT)/libavutil -lavutil$(BUILDSUF) $(EXTRALIBS)
-OBJS= swscale.o rgb2rgb.o
+OBJS = rgb2rgb.o swscale.o
-OBJS-$(TARGET_ALTIVEC) += yuv2rgb_altivec.o
OBJS-$(CONFIG_GPL) += yuv2rgb.o
+OBJS-$(HAVE_ALTIVEC) += yuv2rgb_altivec.o
+
+OBJS-$(ARCH_BFIN) += swscale_bfin.o \
+ yuv2rgb_bfin.o \
+
+ASM_OBJS-$(ARCH_BFIN) += internal_bfin.o
HEADERS = swscale.h rgb2rgb.h
include ../common.mak
-cs_test: cs_test.o $(LIB)
+cs_test: cs_test.o $(LIBNAME)
-swscale-example: swscale-example.o $(LIB) -lm
+swscale-example: swscale-example.o $(LIBNAME)
+swscale-example: EXTRALIBS += -lm
clean::
rm -f cs_test swscale-example
diff --git a/contrib/ffmpeg/libswscale/cs_test.c b/contrib/ffmpeg/libswscale/cs_test.c
index cd0100618..2bb74865b 100644
--- a/contrib/ffmpeg/libswscale/cs_test.c
+++ b/contrib/ffmpeg/libswscale/cs_test.c
@@ -15,7 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
@@ -31,6 +31,7 @@
#define srcByte 0x55
#define dstByte 0xBB
+#define FUNC(s,d,n) {s,d,#n,n}
static int cpu_caps;
@@ -59,245 +60,116 @@ static char *args_parse(int argc, char *argv[])
int main(int argc, char **argv)
{
- int i, funcNum;
- uint8_t *srcBuffer= (uint8_t*)av_malloc(SIZE);
- uint8_t *dstBuffer= (uint8_t*)av_malloc(SIZE);
- int failedNum=0;
- int passedNum=0;
-
- av_log(NULL, AV_LOG_INFO, "memory corruption test ...\n");
- args_parse(argc, argv);
- av_log(NULL, AV_LOG_INFO, "CPU capabilities forced to %x\n", cpu_caps);
- sws_rgb2rgb_init(cpu_caps);
-
- for(funcNum=0; funcNum<100; funcNum++){
- int width;
- int failed=0;
- int srcBpp=0;
- int dstBpp=0;
+ int i, funcNum;
+ uint8_t *srcBuffer= (uint8_t*)av_malloc(SIZE);
+ uint8_t *dstBuffer= (uint8_t*)av_malloc(SIZE);
+ int failedNum=0;
+ int passedNum=0;
- av_log(NULL, AV_LOG_INFO,".");
- memset(srcBuffer, srcByte, SIZE);
+ av_log(NULL, AV_LOG_INFO, "memory corruption test ...\n");
+ args_parse(argc, argv);
+ av_log(NULL, AV_LOG_INFO, "CPU capabilities forced to %x\n", cpu_caps);
+ sws_rgb2rgb_init(cpu_caps);
- for(width=32; width<64; width++){
- int dstOffset;
- for(dstOffset=128; dstOffset<196; dstOffset+=4){
- int srcOffset;
- memset(dstBuffer, dstByte, SIZE);
+ for(funcNum=0; ; funcNum++){
+ struct func_info_s {
+ int src_bpp;
+ int dst_bpp;
+ char *name;
+ void (*func)(const uint8_t *src, uint8_t *dst, long src_size);
+ } func_info[] = {
+ FUNC(2, 2, rgb15to16),
+ FUNC(2, 3, rgb15to24),
+ FUNC(2, 4, rgb15to32),
+ FUNC(2, 3, rgb16to24),
+ FUNC(2, 4, rgb16to32),
+ FUNC(3, 2, rgb24to15),
+ FUNC(3, 2, rgb24to16),
+ FUNC(3, 4, rgb24to32),
+ FUNC(4, 2, rgb32to15),
+ FUNC(4, 2, rgb32to16),
+ FUNC(4, 3, rgb32to24),
+ FUNC(2, 2, rgb16to15),
+ FUNC(2, 2, rgb15tobgr15),
+ FUNC(2, 2, rgb15tobgr16),
+ FUNC(2, 3, rgb15tobgr24),
+ FUNC(2, 4, rgb15tobgr32),
+ FUNC(2, 2, rgb16tobgr15),
+ FUNC(2, 2, rgb16tobgr16),
+ FUNC(2, 3, rgb16tobgr24),
+ FUNC(2, 4, rgb16tobgr32),
+ FUNC(3, 2, rgb24tobgr15),
+ FUNC(3, 2, rgb24tobgr16),
+ FUNC(3, 3, rgb24tobgr24),
+ FUNC(3, 4, rgb24tobgr32),
+ FUNC(4, 2, rgb32tobgr15),
+ FUNC(4, 2, rgb32tobgr16),
+ FUNC(4, 3, rgb32tobgr24),
+ FUNC(4, 4, rgb32tobgr32),
+ FUNC(0, 0, NULL)
+ };
+ int width;
+ int failed=0;
+ int srcBpp=0;
+ int dstBpp=0;
- for(srcOffset=128; srcOffset<196; srcOffset+=4){
- uint8_t *src= srcBuffer+srcOffset;
- uint8_t *dst= dstBuffer+dstOffset;
- char *name=NULL;
-
- if(failed) break; //don't fill the screen with shit ...
+ if (!func_info[funcNum].func) break;
- switch(funcNum){
- case 0:
- srcBpp=2;
- dstBpp=2;
- name="rgb15to16";
- rgb15to16(src, dst, width*srcBpp);
- break;
- case 1:
- srcBpp=2;
- dstBpp=3;
- name="rgb15to24";
- rgb15to24(src, dst, width*srcBpp);
- break;
- case 2:
- srcBpp=2;
- dstBpp=4;
- name="rgb15to32";
- rgb15to32(src, dst, width*srcBpp);
- break;
- case 3:
- srcBpp=2;
- dstBpp=3;
- name="rgb16to24";
- rgb16to24(src, dst, width*srcBpp);
- break;
- case 4:
- srcBpp=2;
- dstBpp=4;
- name="rgb16to32";
- rgb16to32(src, dst, width*srcBpp);
- break;
- case 5:
- srcBpp=3;
- dstBpp=2;
- name="rgb24to15";
- rgb24to15(src, dst, width*srcBpp);
- break;
- case 6:
- srcBpp=3;
- dstBpp=2;
- name="rgb24to16";
- rgb24to16(src, dst, width*srcBpp);
- break;
- case 7:
- srcBpp=3;
- dstBpp=4;
- name="rgb24to32";
- rgb24to32(src, dst, width*srcBpp);
- break;
- case 8:
- srcBpp=4;
- dstBpp=2;
- name="rgb32to15";
- //((*s++) << TGA_SHIFT32) | TGA_ALPHA32;
- rgb32to15(src, dst, width*srcBpp);
- break;
- case 9:
- srcBpp=4;
- dstBpp=2;
- name="rgb32to16";
- rgb32to16(src, dst, width*srcBpp);
- break;
- case 10:
- srcBpp=4;
- dstBpp=3;
- name="rgb32to24";
- rgb32to24(src, dst, width*srcBpp);
- break;
- case 11:
- srcBpp=2;
- dstBpp=2;
- name="rgb16to15";
- rgb16to15(src, dst, width*srcBpp);
- break;
-
- case 14:
- srcBpp=2;
- dstBpp=2;
- name="rgb15tobgr15";
- rgb15tobgr15(src, dst, width*srcBpp);
- break;
- case 15:
- srcBpp=2;
- dstBpp=2;
- name="rgb15tobgr16";
- rgb15tobgr16(src, dst, width*srcBpp);
- break;
- case 16:
- srcBpp=2;
- dstBpp=3;
- name="rgb15tobgr24";
- rgb15tobgr24(src, dst, width*srcBpp);
- break;
- case 17:
- srcBpp=2;
- dstBpp=4;
- name="rgb15tobgr32";
- rgb15tobgr32(src, dst, width*srcBpp);
- break;
- case 18:
- srcBpp=2;
- dstBpp=2;
- name="rgb16tobgr15";
- rgb16tobgr15(src, dst, width*srcBpp);
- break;
- case 19:
- srcBpp=2;
- dstBpp=2;
- name="rgb16tobgr16";
- rgb16tobgr16(src, dst, width*srcBpp);
- break;
- case 20:
- srcBpp=2;
- dstBpp=3;
- name="rgb16tobgr24";
- rgb16tobgr24(src, dst, width*srcBpp);
- break;
- case 21:
- srcBpp=2;
- dstBpp=4;
- name="rgb16tobgr32";
- rgb16tobgr32(src, dst, width*srcBpp);
- break;
- case 22:
- srcBpp=3;
- dstBpp=2;
- name="rgb24tobgr15";
- rgb24tobgr15(src, dst, width*srcBpp);
- break;
- case 23:
- srcBpp=3;
- dstBpp=2;
- name="rgb24tobgr16";
- rgb24tobgr16(src, dst, width*srcBpp);
- break;
- case 24:
- srcBpp=3;
- dstBpp=3;
- name="rgb24tobgr24";
- rgb24tobgr24(src, dst, width*srcBpp);
- break;
- case 25:
- srcBpp=3;
- dstBpp=4;
- name="rgb24tobgr32";
- rgb24tobgr32(src, dst, width*srcBpp);
- break;
- case 26:
- srcBpp=4;
- dstBpp=2;
- name="rgb32tobgr15";
- rgb32tobgr15(src, dst, width*srcBpp);
- break;
- case 27:
- srcBpp=4;
- dstBpp=2;
- name="rgb32tobgr16";
- rgb32tobgr16(src, dst, width*srcBpp);
- break;
- case 28:
- srcBpp=4;
- dstBpp=3;
- name="rgb32tobgr24";
- rgb32tobgr24(src, dst, width*srcBpp);
- break;
- case 29:
- srcBpp=4;
- dstBpp=4;
- name="rgb32tobgr32";
- rgb32tobgr32(src, dst, width*srcBpp);
- break;
+ av_log(NULL, AV_LOG_INFO,".");
+ memset(srcBuffer, srcByte, SIZE);
- }
- if(!srcBpp) break;
+ for(width=63; width>0; width--){
+ int dstOffset;
+ for(dstOffset=128; dstOffset<196; dstOffset+=4){
+ int srcOffset;
+ memset(dstBuffer, dstByte, SIZE);
- for(i=0; i<SIZE; i++){
- if(srcBuffer[i]!=srcByte){
- av_log(NULL, AV_LOG_INFO, "src damaged at %d w:%d src:%d dst:%d %s\n",
- i, width, srcOffset, dstOffset, name);
- failed=1;
- break;
- }
- }
- for(i=0; i<dstOffset; i++){
- if(dstBuffer[i]!=dstByte){
- av_log(NULL, AV_LOG_INFO, "dst damaged at %d w:%d src:%d dst:%d %s\n",
- i, width, srcOffset, dstOffset, name);
- failed=1;
- break;
- }
- }
- for(i=dstOffset + width*dstBpp; i<SIZE; i++){
- if(dstBuffer[i]!=dstByte){
- av_log(NULL, AV_LOG_INFO, "dst damaged at %d w:%d src:%d dst:%d %s\n",
- i, width, srcOffset, dstOffset, name);
- failed=1;
- break;
- }
- }
- }
- }
- }
- if(failed) failedNum++;
- else if(srcBpp) passedNum++;
- }
-
- av_log(NULL, AV_LOG_INFO, "%d converters passed, %d converters randomly overwrote memory\n", passedNum, failedNum);
- return failedNum;
+ for(srcOffset=128; srcOffset<196; srcOffset+=4){
+ uint8_t *src= srcBuffer+srcOffset;
+ uint8_t *dst= dstBuffer+dstOffset;
+ char *name=NULL;
+
+ if(failed) break; //don't fill the screen with shit ...
+
+ srcBpp = func_info[funcNum].src_bpp;
+ dstBpp = func_info[funcNum].dst_bpp;
+ name = func_info[funcNum].name;
+
+ func_info[funcNum].func(src, dst, width*srcBpp);
+
+ if(!srcBpp) break;
+
+ for(i=0; i<SIZE; i++){
+ if(srcBuffer[i]!=srcByte){
+ av_log(NULL, AV_LOG_INFO, "src damaged at %d w:%d src:%d dst:%d %s\n",
+ i, width, srcOffset, dstOffset, name);
+ failed=1;
+ break;
+ }
+ }
+ for(i=0; i<dstOffset; i++){
+ if(dstBuffer[i]!=dstByte){
+ av_log(NULL, AV_LOG_INFO, "dst damaged at %d w:%d src:%d dst:%d %s\n",
+ i, width, srcOffset, dstOffset, name);
+ failed=1;
+ break;
+ }
+ }
+ for(i=dstOffset + width*dstBpp; i<SIZE; i++){
+ if(dstBuffer[i]!=dstByte){
+ av_log(NULL, AV_LOG_INFO, "dst damaged at %d w:%d src:%d dst:%d %s\n",
+ i, width, srcOffset, dstOffset, name);
+ failed=1;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if(failed) failedNum++;
+ else if(srcBpp) passedNum++;
+ }
+
+ av_log(NULL, AV_LOG_INFO, "\n%d converters passed, %d converters randomly overwrote memory\n", passedNum, failedNum);
+ return failedNum;
}
diff --git a/contrib/ffmpeg/libswscale/internal_bfin.S b/contrib/ffmpeg/libswscale/internal_bfin.S
new file mode 100644
index 000000000..2d5c61a14
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/internal_bfin.S
@@ -0,0 +1,607 @@
+/*
+ * Copyright (C) 2007 Marc Hoffman <marc.hoffman@analog.com>
+ * April 20, 2007
+ *
+ * Blackfin Video Color Space Converters Operations
+ * convert I420 YV12 to RGB in various formats,
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+/*
+ YUV420 to RGB565 conversion. This routine takes a YUV 420 planar macroblock
+ and converts it to RGB565. R:5 bits, G:6 bits, B:5 bits.. packed into shorts
+
+
+ The following calculation is used for the conversion:
+
+ r = clipz((y-oy)*cy + crv*(v-128))
+ g = clipz((y-oy)*cy + cgv*(v-128) + cgu*(u-128))
+ b = clipz((y-oy)*cy + cbu*(u-128))
+
+ y,u,v are pre scaled by a factor of 4 i.e. left shifted to gain precision.
+
+
+ New factorization to eliminate the truncation error which was
+ occuring due to the byteop3p.
+
+
+ 1) use the bytop16m to subtract quad bytes we use this in U8 this
+ then so the offsets need to be renormalized to 8bits.
+
+ 2) scale operands up by a factor of 4 not 8 because Blackfin
+ multiplies include a shift.
+
+ 3) compute into the accumulators cy*yx0, cy*yx1
+
+ 4) compute each of the linear equations
+ r = clipz((y-oy)*cy + crv*(v-128))
+
+ g = clipz((y-oy)*cy + cgv*(v-128) + cgu*(u-128))
+
+ b = clipz((y-oy)*cy + cbu*(u-128))
+
+ reuse of the accumulators requires that we actually multiply
+ twice once with addition and the second time with a subtaction.
+
+ because of this we need to compute the equations in the order R B
+ then G saving the writes for B in the case of 24/32 bit color
+ formats.
+
+ api: yuv2rgb_kind (uint8_t *Y, uint8_t *U, uint8_t *V, int *out,
+ int dW, uint32_t *coeffs);
+
+ A B
+ --- ---
+ i2 = cb i3 = cr
+ i1 = coeff i0 = y
+
+ Where coeffs have the following layout in memory.
+
+ uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv;
+
+ coeffs is a pointer to oy.
+
+ the {rgb} masks are only utilized by the 565 packing algorithm. Note the data
+ replication is used to simplify the internal algorithms for the dual mac architecture
+ of BlackFin.
+
+ All routines are exported with _ff_bfin_ as a symbol prefix
+
+ rough performance gain compared against -O3:
+
+ 2779809/1484290 187.28%
+
+ which translates to ~33c/pel to ~57c/pel for the reference vs 17.5
+ c/pel for the optimized implementations. Not sure why there is such a
+ huge variation on the reference codes on Blackfin I guess it must have
+ to do with the memory system.
+
+*/
+
+#define mL3 .text
+#ifdef __FDPIC__
+#define mL1 .l1.text
+#else
+#define mL1 mL3
+#endif
+#define MEM mL1
+
+#define DEFUN(fname,where,interface) \
+ .section where; \
+ .global _ff_bfin_ ## fname; \
+ .type _ff_bfin_ ## fname, STT_FUNC; \
+ .align 8; \
+ _ff_bfin_ ## fname
+
+#define DEFUN_END(fname) \
+ .size _ff_bfin_ ## fname, . - _ff_bfin_ ## fname
+
+
+.text
+
+#define COEFF_LEN 11*4
+#define COEFF_REL_CY_OFF 4*4
+
+#define ARG_OUT 20
+#define ARG_W 24
+#define ARG_COEFF 28
+
+DEFUN(yuv2rgb565_line,MEM,
+ (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)):
+ link 0;
+ [--sp] = (r7:4);
+ p1 = [fp+ARG_OUT];
+ r3 = [fp+ARG_W];
+
+ i0 = r0;
+ i2 = r1;
+ i3 = r2;
+
+ r0 = [fp+ARG_COEFF];
+ i1 = r0;
+ b1 = i1;
+ l1 = COEFF_LEN;
+ m0 = COEFF_REL_CY_OFF;
+ p0 = r3;
+
+ r0 = [i0++]; // 2Y
+ r1.l = w[i2++]; // 2u
+ r1.h = w[i3++]; // 2v
+ p0 = p0>>2;
+
+ lsetup (.L0565, .L1565) lc0 = p0;
+
+ /*
+ uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv
+ r0 -- used to load 4ys
+ r1 -- used to load 2us,2vs
+ r4 -- y3,y2
+ r5 -- y1,y0
+ r6 -- u1,u0
+ r7 -- v1,v0
+ */
+ r2=[i1++]; // oy
+.L0565:
+ /*
+ rrrrrrrr gggggggg bbbbbbbb
+ 5432109876543210
+ bbbbb >>3
+ gggggggg <<3
+ rrrrrrrr <<8
+ rrrrrggggggbbbbb
+ */
+ (r4,r5) = byteop16m (r1:0, r3:2) || r3=[i1++]; // oc
+ (r7,r6) = byteop16m (r1:0, r3:2) (r);
+ r5 = r5 << 2 (v); // y1,y0
+ r4 = r4 << 2 (v); // y3,y2
+ r6 = r6 << 2 (v) || r0=[i1++]; // u1,u0, r0=zero
+ r7 = r7 << 2 (v) || r1=[i1++]; // v1,v0 r1=cy
+ /* Y' = y*cy */
+ a1 = r1.h*r5.h, a0 = r1.l*r5.l || r1=[i1++]; // crv
+
+ /* R = Y+ crv*(Cr-128) */
+ r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+ a1 -= r1.h*r7.l, a0 -= r1.l*r7.l || r5=[i1++]; // rmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu
+ r2 = r2 >> 3 (v);
+ r3 = r2 & r5;
+
+ /* B = Y+ cbu*(Cb-128) */
+ r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l);
+ a1 -= r1.h*r6.l, a0 -= r1.l*r6.l || r5=[i1++]; // bmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu
+ r2 = r2 << 8 (v);
+ r2 = r2 & r5;
+ r3 = r3 | r2;
+
+ /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+ a1 += r1.h*r6.l, a0 += r1.l*r6.l || r1=[i1++]; // cgv
+ r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+ r2 = byteop3p(r3:2, r1:0)(LO) || r5=[i1++m0]; // gmask
+ r2 = r2 << 3 (v);
+ r2 = r2 & r5;
+ r3 = r3 | r2;
+ [p1++]=r3 || r1=[i1++]; // cy
+
+ /* Y' = y*cy */
+
+ a1 = r1.h*r4.h, a0 = r1.l*r4.l || r1=[i1++]; // crv
+
+ /* R = Y+ crv*(Cr-128) */
+ r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h);
+ a1 -= r1.h*r7.h, a0 -= r1.l*r7.h || r5=[i1++]; // rmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu
+ r2 = r2 >> 3 (v);
+ r3 = r2 & r5;
+
+ /* B = Y+ cbu*(Cb-128) */
+ r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h);
+ a1 -= r1.h*r6.h, a0 -= r1.l*r6.h || r5=[i1++]; // bmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu
+ r2 = r2 << 8 (v);
+ r2 = r2 & r5;
+ r3 = r3 | r2;
+
+ /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+ a1 += r1.h*r6.h, a0 += r1.l*r6.h || r1=[i1++]; // cgv
+ r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h) || r5=[i1++]; // gmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r0 = [i0++]; // 2Y
+ r2 = r2 << 3 (v) || r1.l = w[i2++]; // 2u
+ r2 = r2 & r5;
+ r3 = r3 | r2;
+ [p1++]=r3 || r1.h = w[i3++]; // 2v
+.L1565: r2=[i1++]; // oy
+
+ l1 = 0;
+
+ (r7:4) = [sp++];
+ unlink;
+ rts;
+DEFUN_END(yuv2rgb565_line)
+
+DEFUN(yuv2rgb555_line,MEM,
+ (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)):
+ link 0;
+ [--sp] = (r7:4);
+ p1 = [fp+ARG_OUT];
+ r3 = [fp+ARG_W];
+
+ i0 = r0;
+ i2 = r1;
+ i3 = r2;
+
+ r0 = [fp+ARG_COEFF];
+ i1 = r0;
+ b1 = i1;
+ l1 = COEFF_LEN;
+ m0 = COEFF_REL_CY_OFF;
+ p0 = r3;
+
+ r0 = [i0++]; // 2Y
+ r1.l = w[i2++]; // 2u
+ r1.h = w[i3++]; // 2v
+ p0 = p0>>2;
+
+ lsetup (.L0555, .L1555) lc0 = p0;
+
+ /*
+ uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv
+ r0 -- used to load 4ys
+ r1 -- used to load 2us,2vs
+ r4 -- y3,y2
+ r5 -- y1,y0
+ r6 -- u1,u0
+ r7 -- v1,v0
+ */
+ r2=[i1++]; // oy
+.L0555:
+ /*
+ rrrrrrrr gggggggg bbbbbbbb
+ 5432109876543210
+ bbbbb >>3
+ gggggggg <<2
+ rrrrrrrr <<7
+ xrrrrrgggggbbbbb
+ */
+
+ (r4,r5) = byteop16m (r1:0, r3:2) || r3=[i1++]; // oc
+ (r7,r6) = byteop16m (r1:0, r3:2) (r);
+ r5 = r5 << 2 (v); // y1,y0
+ r4 = r4 << 2 (v); // y3,y2
+ r6 = r6 << 2 (v) || r0=[i1++]; // u1,u0, r0=zero
+ r7 = r7 << 2 (v) || r1=[i1++]; // v1,v0 r1=cy
+ /* Y' = y*cy */
+ a1 = r1.h*r5.h, a0 = r1.l*r5.l || r1=[i1++]; // crv
+
+ /* R = Y+ crv*(Cr-128) */
+ r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+ a1 -= r1.h*r7.l, a0 -= r1.l*r7.l || r5=[i1++]; // rmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu
+ r2 = r2 >> 3 (v);
+ r3 = r2 & r5;
+
+ /* B = Y+ cbu*(Cb-128) */
+ r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l);
+ a1 -= r1.h*r6.l, a0 -= r1.l*r6.l || r5=[i1++]; // bmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu
+ r2 = r2 << 7 (v);
+ r2 = r2 & r5;
+ r3 = r3 | r2;
+
+ /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+ a1 += r1.h*r6.l, a0 += r1.l*r6.l || r1=[i1++]; // cgv
+ r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+ r2 = byteop3p(r3:2, r1:0)(LO) || r5=[i1++m0]; // gmask
+ r2 = r2 << 2 (v);
+ r2 = r2 & r5;
+ r3 = r3 | r2;
+ [p1++]=r3 || r1=[i1++]; // cy
+
+ /* Y' = y*cy */
+
+ a1 = r1.h*r4.h, a0 = r1.l*r4.l || r1=[i1++]; // crv
+
+ /* R = Y+ crv*(Cr-128) */
+ r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h);
+ a1 -= r1.h*r7.h, a0 -= r1.l*r7.h || r5=[i1++]; // rmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu
+ r2 = r2 >> 3 (v);
+ r3 = r2 & r5;
+
+ /* B = Y+ cbu*(Cb-128) */
+ r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h);
+ a1 -= r1.h*r6.h, a0 -= r1.l*r6.h || r5=[i1++]; // bmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu
+ r2 = r2 << 7 (v);
+ r2 = r2 & r5;
+ r3 = r3 | r2;
+
+ /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+ a1 += r1.h*r6.h, a0 += r1.l*r6.h || r1=[i1++]; // cgv
+ r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h) || r5=[i1++]; // gmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r0=[i0++]; // 4Y
+ r2 = r2 << 2 (v) || r1.l=w[i2++]; // 2u
+ r2 = r2 & r5;
+ r3 = r3 | r2;
+ [p1++]=r3 || r1.h=w[i3++]; // 2v
+
+.L1555: r2=[i1++]; // oy
+
+ l1 = 0;
+
+ (r7:4) = [sp++];
+ unlink;
+ rts;
+DEFUN_END(yuv2rgb555_line)
+
+DEFUN(yuv2rgb24_line,MEM,
+ (uint8_t *Y, uint8_t *U, uint8_t *V, int *out, int dW, uint32_t *coeffs)):
+ link 0;
+ [--sp] = (r7:4);
+ p1 = [fp+ARG_OUT];
+ r3 = [fp+ARG_W];
+ p2 = p1;
+ p2 += 3;
+
+ i0 = r0;
+ i2 = r1;
+ i3 = r2;
+
+ r0 = [fp+ARG_COEFF]; // coeff buffer
+ i1 = r0;
+ b1 = i1;
+ l1 = COEFF_LEN;
+ m0 = COEFF_REL_CY_OFF;
+ p0 = r3;
+
+ r0 = [i0++]; // 2Y
+ r1.l = w[i2++]; // 2u
+ r1.h = w[i3++]; // 2v
+ p0 = p0>>2;
+
+ lsetup (.L0888, .L1888) lc0 = p0;
+
+ /*
+ uint32_t oy,oc,zero,cy,crv,rmask,cbu,bmask,cgu,cgv
+ r0 -- used to load 4ys
+ r1 -- used to load 2us,2vs
+ r4 -- y3,y2
+ r5 -- y1,y0
+ r6 -- u1,u0
+ r7 -- v1,v0
+ */
+ r2=[i1++]; // oy
+.L0888:
+ (r4,r5) = byteop16m (r1:0, r3:2) || r3=[i1++]; // oc
+ (r7,r6) = byteop16m (r1:0, r3:2) (r);
+ r5 = r5 << 2 (v); // y1,y0
+ r4 = r4 << 2 (v); // y3,y2
+ r6 = r6 << 2 (v) || r0=[i1++]; // u1,u0, r0=zero
+ r7 = r7 << 2 (v) || r1=[i1++]; // v1,v0 r1=cy
+
+ /* Y' = y*cy */
+ a1 = r1.h*r5.h, a0 = r1.l*r5.l || r1=[i1++]; // crv
+
+ /* R = Y+ crv*(Cr-128) */
+ r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+ a1 -= r1.h*r7.l, a0 -= r1.l*r7.l || r5=[i1++]; // rmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu
+ r2=r2>>16 || B[p1++]=r2;
+ B[p2++]=r2;
+
+ /* B = Y+ cbu*(Cb-128) */
+ r2.h = (a1 += r1.h*r6.l), r2.l = (a0 += r1.l*r6.l);
+ a1 -= r1.h*r6.l, a0 -= r1.l*r6.l || r5=[i1++]; // bmask
+ r3 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu
+
+ /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+ a1 += r1.h*r6.l, a0 += r1.l*r6.l || r1=[i1++]; // cgv
+ r2.h = (a1 += r1.h*r7.l), r2.l = (a0 += r1.l*r7.l);
+ r2 = byteop3p(r3:2, r1:0)(LO) || r5=[i1++m0]; // gmask, oy,cy,zero
+
+ r2=r2>>16 || B[p1++]=r2;
+ B[p2++]=r2;
+
+ r3=r3>>16 || B[p1++]=r3;
+ B[p2++]=r3 || r1=[i1++]; // cy
+
+ p1+=3;
+ p2+=3;
+ /* Y' = y*cy */
+ a1 = r1.h*r4.h, a0 = r1.l*r4.l || r1=[i1++]; // crv
+
+ /* R = Y+ crv*(Cr-128) */
+ r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h);
+ a1 -= r1.h*r7.h, a0 -= r1.l*r7.h || r5=[i1++]; // rmask
+ r2 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cbu
+ r2=r2>>16 || B[p1++]=r2;
+ B[p2++]=r2;
+
+ /* B = Y+ cbu*(Cb-128) */
+ r2.h = (a1 += r1.h*r6.h), r2.l = (a0 += r1.l*r6.h);
+ a1 -= r1.h*r6.h, a0 -= r1.l*r6.h || r5=[i1++]; // bmask
+ r3 = byteop3p(r3:2, r1:0)(LO) || r1=[i1++]; // cgu
+
+ /* G = Y+ cgu*(Cb-128)+cgv*(Cr-128) */
+ a1 += r1.h*r6.h, a0 += r1.l*r6.h || r1=[i1++]; // cgv
+ r2.h = (a1 += r1.h*r7.h), r2.l = (a0 += r1.l*r7.h);
+ r2 = byteop3p(r3:2, r1:0)(LO) || r5=[i1++]; // gmask
+ r2=r2>>16 || B[p1++]=r2 || r0 = [i0++]; // 4y
+ B[p2++]=r2 || r1.l = w[i2++]; // 2u
+ r3=r3>>16 || B[p1++]=r3 || r1.h = w[i3++]; // 2v
+ B[p2++]=r3 || r2=[i1++]; // oy
+
+ p1+=3;
+.L1888: p2+=3;
+
+ l1 = 0;
+
+ (r7:4) = [sp++];
+ unlink;
+ rts;
+DEFUN_END(yuv2rgb24_line)
+
+
+
+#define ARG_vdst 20
+#define ARG_width 24
+#define ARG_height 28
+#define ARG_lumStride 32
+#define ARG_chromStride 36
+#define ARG_srcStride 40
+
+DEFUN(uyvytoyv12, mL3, (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)):
+ link 0;
+ [--sp] = (r7:4,p5:4);
+
+ p0 = r1; // Y top even
+
+ i2 = r2; // *u
+ r2 = [fp + ARG_vdst];
+ i3 = r2; // *v
+
+ r1 = [fp + ARG_srcStride];
+ r2 = r0 + r1;
+ r1 += -8; // i0,i1 is pre read need to correct
+ m0 = r1;
+
+ i0 = r0; // uyvy_T even
+ i1 = r2; // uyvy_B odd
+
+ p2 = [fp + ARG_lumStride];
+ p1 = p0 + p2; // Y bot odd
+
+ p5 = [fp + ARG_width];
+ p4 = [fp + ARG_height];
+ r0 = p5;
+ p4 = p4 >> 1;
+ p5 = p5 >> 2;
+
+ r2 = [fp + ARG_chromStride];
+ r0 = r0 >> 1;
+ r2 = r2 - r0;
+ m1 = r2;
+
+ /* I0,I1 - src input line pointers
+ * p0,p1 - luma output line pointers
+ * I2 - dstU
+ * I3 - dstV
+ */
+
+ lsetup (0f, 1f) lc1 = p4; // H/2
+0: r0 = [i0++] || r2 = [i1++];
+ r1 = [i0++] || r3 = [i1++];
+ r4 = byteop1p(r1:0, r3:2);
+ r5 = byteop1p(r1:0, r3:2) (r);
+ lsetup (2f, 3f) lc0 = p5; // W/4
+2: r0 = r0 >> 8(v);
+ r1 = r1 >> 8(v);
+ r2 = r2 >> 8(v);
+ r3 = r3 >> 8(v);
+ r0 = bytepack(r0, r1);
+ r2 = bytepack(r2, r3) || [p0++] = r0; // yyyy
+ r6 = pack(r5.l, r4.l) || [p1++] = r2; // yyyy
+ r7 = pack(r5.h, r4.h) || r0 = [i0++] || r2 = [i1++];
+ r6 = bytepack(r6, r7) || r1 = [i0++] || r3 = [i1++];
+ r4 = byteop1p(r1:0, r3:2) || w[i2++] = r6.l; // uu
+3: r5 = byteop1p(r1:0, r3:2) (r) || w[i3++] = r6.h; // vv
+
+ i0 += m0;
+ i1 += m0;
+ i2 += m1;
+ i3 += m1;
+ p0 = p0 + p2;
+1: p1 = p1 + p2;
+
+ (r7:4,p5:4) = [sp++];
+ unlink;
+ rts;
+DEFUN_END(uyvytoyv12)
+
+DEFUN(yuyvtoyv12, mL3, (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)):
+ link 0;
+ [--sp] = (r7:4,p5:4);
+
+ p0 = r1; // Y top even
+
+ i2 = r2; // *u
+ r2 = [fp + ARG_vdst];
+ i3 = r2; // *v
+
+ r1 = [fp + ARG_srcStride];
+ r2 = r0 + r1;
+ r1 += -8; // i0,i1 is pre read need to correct
+ m0 = r1;
+
+ i0 = r0; // uyvy_T even
+ i1 = r2; // uyvy_B odd
+
+ p2 = [fp + ARG_lumStride];
+ p1 = p0 + p2; // Y bot odd
+
+ p5 = [fp + ARG_width];
+ p4 = [fp + ARG_height];
+ r0 = p5;
+ p4 = p4 >> 1;
+ p5 = p5 >> 2;
+
+ r2 = [fp + ARG_chromStride];
+ r0 = r0 >> 1;
+ r2 = r2 - r0;
+ m1 = r2;
+
+ /* I0,I1 - src input line pointers
+ * p0,p1 - luma output line pointers
+ * I2 - dstU
+ * I3 - dstV
+ */
+
+ lsetup (0f, 1f) lc1 = p4; // H/2
+0: r0 = [i0++] || r2 = [i1++];
+ r1 = [i0++] || r3 = [i1++];
+ r4 = bytepack(r0, r1);
+ r5 = bytepack(r2, r3);
+ lsetup (2f, 3f) lc0 = p5; // W/4
+2: r0 = r0 >> 8(v) || [p0++] = r4; // yyyy-even
+ r1 = r1 >> 8(v) || [p1++] = r5; // yyyy-odd
+ r2 = r2 >> 8(v);
+ r3 = r3 >> 8(v);
+ r4 = byteop1p(r1:0, r3:2);
+ r5 = byteop1p(r1:0, r3:2) (r);
+ r6 = pack(r5.l, r4.l);
+ r7 = pack(r5.h, r4.h) || r0 = [i0++] || r2 = [i1++];
+ r6 = bytepack(r6, r7) || r1 = [i0++] || r3 = [i1++];
+ r4 = bytepack(r0, r1) || w[i2++] = r6.l; // uu
+3: r5 = bytepack(r2, r3) || w[i3++] = r6.h; // vv
+
+ i0 += m0;
+ i1 += m0;
+ i2 += m1;
+ i3 += m1;
+ p0 = p0 + p2;
+1: p1 = p1 + p2;
+
+ (r7:4,p5:4) = [sp++];
+ unlink;
+ rts;
+DEFUN_END(yuyvtoyv12)
diff --git a/contrib/ffmpeg/libswscale/rgb2rgb.c b/contrib/ffmpeg/libswscale/rgb2rgb.c
index a938abfc9..faf3e95c6 100644
--- a/contrib/ffmpeg/libswscale/rgb2rgb.c
+++ b/contrib/ffmpeg/libswscale/rgb2rgb.c
@@ -1,5 +1,4 @@
/*
- *
* rgb2rgb.c, Software RGB to RGB convertor
* pluralize by Software PAL8 to RGB convertor
* Software YUV to YUV convertor
@@ -21,8 +20,8 @@
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
* the C code (not assembly, mmx, ...) of this file can be used
* under the LGPL license too
*/
@@ -33,24 +32,21 @@
#include "swscale_internal.h"
#include "x86_cpu.h"
#include "bswap.h"
-#ifdef USE_FASTMEMCPY
-#include "libvo/fastmemcpy.h"
-#endif
#define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit
-void (*rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb24to16)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb24to15)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb32to16)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb32to15)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb15to24)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb15to32)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb16to24)(const uint8_t *src,uint8_t *dst,long src_size);
-void (*rgb16to32)(const uint8_t *src,uint8_t *dst,long src_size);
+void (*rgb24to32)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32to24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb15to24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb16to24)(const uint8_t *src, uint8_t *dst, long src_size);
+void (*rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size);
//void (*rgb24tobgr32)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
@@ -61,83 +57,70 @@ void (*rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
void (*rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride);
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride);
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride);
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
- long width, long height,
- long lumStride, long chromStride, long srcStride);
+ long width, long height,
+ long lumStride, long chromStride, long srcStride);
void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
- long width, long height,
- long lumStride, long chromStride, long srcStride);
+ long width, long height,
+ long lumStride, long chromStride, long srcStride);
void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height,
- long srcStride, long dstStride);
+ long srcStride, long dstStride);
void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst,
- long width, long height, long src1Stride,
- long src2Stride, long dstStride);
+ long width, long height, long src1Stride,
+ long src2Stride, long dstStride);
void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
- uint8_t *dst1, uint8_t *dst2,
- long width, long height,
- long srcStride1, long srcStride2,
- long dstStride1, long dstStride2);
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long dstStride1, long dstStride2);
void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
- uint8_t *dst,
- long width, long height,
- long srcStride1, long srcStride2,
- long srcStride3, long dstStride);
+ uint8_t *dst,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long srcStride3, long dstStride);
#if defined(ARCH_X86) && defined(CONFIG_GPL)
-static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL;
-static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL;
-static const uint64_t mask32b attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL;
-static const uint64_t mask32g attribute_used __attribute__((aligned(8))) = 0x0000FF000000FF00ULL;
-static const uint64_t mask32r attribute_used __attribute__((aligned(8))) = 0x00FF000000FF0000ULL;
-static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
-static const uint64_t mask3216br __attribute__((aligned(8)))=0x00F800F800F800F8ULL;
-static const uint64_t mask3216g __attribute__((aligned(8)))=0x0000FC000000FC00ULL;
-static const uint64_t mask3215g __attribute__((aligned(8)))=0x0000F8000000F800ULL;
-static const uint64_t mul3216 __attribute__((aligned(8))) = 0x2000000420000004ULL;
-static const uint64_t mul3215 __attribute__((aligned(8))) = 0x2000000820000008ULL;
-static const uint64_t mask24b attribute_used __attribute__((aligned(8))) = 0x00FF0000FF0000FFULL;
-static const uint64_t mask24g attribute_used __attribute__((aligned(8))) = 0xFF0000FF0000FF00ULL;
-static const uint64_t mask24r attribute_used __attribute__((aligned(8))) = 0x0000FF0000FF0000ULL;
-static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
-static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
-static const uint64_t mask24hh __attribute__((aligned(8))) = 0xffff000000000000ULL;
-static const uint64_t mask24hhh __attribute__((aligned(8))) = 0xffffffff00000000ULL;
-static const uint64_t mask24hhhh __attribute__((aligned(8))) = 0xffffffffffff0000ULL;
-static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
-static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
-static const uint64_t mask15s __attribute__((aligned(8))) = 0xFFE0FFE0FFE0FFE0ULL;
-static const uint64_t mask15g __attribute__((aligned(8))) = 0x03E003E003E003E0ULL;
-static const uint64_t mask15r __attribute__((aligned(8))) = 0x7C007C007C007C00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_null) = 0x0000000000000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_one) = 0xFFFFFFFFFFFFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32b) = 0x000000FF000000FFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32g) = 0x0000FF000000FF00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32r) = 0x00FF000000FF0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask32) = 0x00FFFFFF00FFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3216br) = 0x00F800F800F800F8ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3216g) = 0x0000FC000000FC00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask3215g) = 0x0000F8000000F800ULL;
+DECLARE_ASM_CONST(8, uint64_t, mul3216) = 0x2000000420000004ULL;
+DECLARE_ASM_CONST(8, uint64_t, mul3215) = 0x2000000820000008ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24b) = 0x00FF0000FF0000FFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24g) = 0xFF0000FF0000FF00ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24r) = 0x0000FF0000FF0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24l) = 0x0000000000FFFFFFULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24h) = 0x0000FFFFFF000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hh) = 0xffff000000000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hhh) = 0xffffffff00000000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask24hhhh) = 0xffffffffffff0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15b) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
+DECLARE_ASM_CONST(8, uint64_t, mask15rg) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
+DECLARE_ASM_CONST(8, uint64_t, mask15s) = 0xFFE0FFE0FFE0FFE0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15g) = 0x03E003E003E003E0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask15r) = 0x7C007C007C007C00ULL;
#define mask16b mask15b
-static const uint64_t mask16g __attribute__((aligned(8))) = 0x07E007E007E007E0ULL;
-static const uint64_t mask16r __attribute__((aligned(8))) = 0xF800F800F800F800ULL;
-static const uint64_t red_16mask __attribute__((aligned(8))) = 0x0000f8000000f800ULL;
-static const uint64_t green_16mask __attribute__((aligned(8)))= 0x000007e0000007e0ULL;
-static const uint64_t blue_16mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
-static const uint64_t red_15mask __attribute__((aligned(8))) = 0x00007c000000f800ULL;
-static const uint64_t green_15mask __attribute__((aligned(8)))= 0x000003e0000007e0ULL;
-static const uint64_t blue_15mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
-
-#ifdef FAST_BGR2YV12
-static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000000210041000DULL;
-static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
-static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
-#else
-static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000020E540830C8BULL;
-static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
-static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
-#endif
-static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL;
-static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL;
-static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask16g) = 0x07E007E007E007E0ULL;
+DECLARE_ASM_CONST(8, uint64_t, mask16r) = 0xF800F800F800F800ULL;
+DECLARE_ASM_CONST(8, uint64_t, red_16mask) = 0x0000f8000000f800ULL;
+DECLARE_ASM_CONST(8, uint64_t, green_16mask) = 0x000007e0000007e0ULL;
+DECLARE_ASM_CONST(8, uint64_t, blue_16mask) = 0x0000001f0000001fULL;
+DECLARE_ASM_CONST(8, uint64_t, red_15mask) = 0x00007c0000007c00ULL;
+DECLARE_ASM_CONST(8, uint64_t, green_15mask) = 0x000003e0000003e0ULL;
+DECLARE_ASM_CONST(8, uint64_t, blue_15mask) = 0x0000001f0000001fULL;
#if 0
static volatile uint64_t __attribute__((aligned(8))) b5Dither;
@@ -146,12 +129,12 @@ static volatile uint64_t __attribute__((aligned(8))) g6Dither;
static volatile uint64_t __attribute__((aligned(8))) r5Dither;
static uint64_t __attribute__((aligned(8))) dither4[2]={
- 0x0103010301030103LL,
- 0x0200020002000200LL,};
+ 0x0103010301030103LL,
+ 0x0200020002000200LL,};
static uint64_t __attribute__((aligned(8))) dither8[2]={
- 0x0602060206020602LL,
- 0x0004000400040004LL,};
+ 0x0602060206020602LL,
+ 0x0004000400040004LL,};
#endif
#endif /* defined(ARCH_X86) */
@@ -215,129 +198,15 @@ static uint64_t __attribute__((aligned(8))) dither8[2]={
void sws_rgb2rgb_init(int flags){
#if (defined(HAVE_MMX2) || defined(HAVE_3DNOW) || defined(HAVE_MMX)) && defined(CONFIG_GPL)
- if(flags & SWS_CPU_CAPS_MMX2){
- rgb15to16= rgb15to16_MMX2;
- rgb15to24= rgb15to24_MMX2;
- rgb15to32= rgb15to32_MMX2;
- rgb16to24= rgb16to24_MMX2;
- rgb16to32= rgb16to32_MMX2;
- rgb16to15= rgb16to15_MMX2;
- rgb24to16= rgb24to16_MMX2;
- rgb24to15= rgb24to15_MMX2;
- rgb24to32= rgb24to32_MMX2;
- rgb32to16= rgb32to16_MMX2;
- rgb32to15= rgb32to15_MMX2;
- rgb32to24= rgb32to24_MMX2;
- rgb24tobgr15= rgb24tobgr15_MMX2;
- rgb24tobgr16= rgb24tobgr16_MMX2;
- rgb24tobgr24= rgb24tobgr24_MMX2;
- rgb32tobgr32= rgb32tobgr32_MMX2;
- rgb32tobgr16= rgb32tobgr16_MMX2;
- rgb32tobgr15= rgb32tobgr15_MMX2;
- yv12toyuy2= yv12toyuy2_MMX2;
- yv12touyvy= yv12touyvy_MMX2;
- yuv422ptoyuy2= yuv422ptoyuy2_MMX2;
- yuy2toyv12= yuy2toyv12_MMX2;
-// uyvytoyv12= uyvytoyv12_MMX2;
-// yvu9toyv12= yvu9toyv12_MMX2;
- planar2x= planar2x_MMX2;
- rgb24toyv12= rgb24toyv12_MMX2;
- interleaveBytes= interleaveBytes_MMX2;
- vu9_to_vu12= vu9_to_vu12_MMX2;
- yvu9_to_yuy2= yvu9_to_yuy2_MMX2;
- }else if(flags & SWS_CPU_CAPS_3DNOW){
- rgb15to16= rgb15to16_3DNOW;
- rgb15to24= rgb15to24_3DNOW;
- rgb15to32= rgb15to32_3DNOW;
- rgb16to24= rgb16to24_3DNOW;
- rgb16to32= rgb16to32_3DNOW;
- rgb16to15= rgb16to15_3DNOW;
- rgb24to16= rgb24to16_3DNOW;
- rgb24to15= rgb24to15_3DNOW;
- rgb24to32= rgb24to32_3DNOW;
- rgb32to16= rgb32to16_3DNOW;
- rgb32to15= rgb32to15_3DNOW;
- rgb32to24= rgb32to24_3DNOW;
- rgb24tobgr15= rgb24tobgr15_3DNOW;
- rgb24tobgr16= rgb24tobgr16_3DNOW;
- rgb24tobgr24= rgb24tobgr24_3DNOW;
- rgb32tobgr32= rgb32tobgr32_3DNOW;
- rgb32tobgr16= rgb32tobgr16_3DNOW;
- rgb32tobgr15= rgb32tobgr15_3DNOW;
- yv12toyuy2= yv12toyuy2_3DNOW;
- yv12touyvy= yv12touyvy_3DNOW;
- yuv422ptoyuy2= yuv422ptoyuy2_3DNOW;
- yuy2toyv12= yuy2toyv12_3DNOW;
-// uyvytoyv12= uyvytoyv12_3DNOW;
-// yvu9toyv12= yvu9toyv12_3DNOW;
- planar2x= planar2x_3DNOW;
- rgb24toyv12= rgb24toyv12_3DNOW;
- interleaveBytes= interleaveBytes_3DNOW;
- vu9_to_vu12= vu9_to_vu12_3DNOW;
- yvu9_to_yuy2= yvu9_to_yuy2_3DNOW;
- }else if(flags & SWS_CPU_CAPS_MMX){
- rgb15to16= rgb15to16_MMX;
- rgb15to24= rgb15to24_MMX;
- rgb15to32= rgb15to32_MMX;
- rgb16to24= rgb16to24_MMX;
- rgb16to32= rgb16to32_MMX;
- rgb16to15= rgb16to15_MMX;
- rgb24to16= rgb24to16_MMX;
- rgb24to15= rgb24to15_MMX;
- rgb24to32= rgb24to32_MMX;
- rgb32to16= rgb32to16_MMX;
- rgb32to15= rgb32to15_MMX;
- rgb32to24= rgb32to24_MMX;
- rgb24tobgr15= rgb24tobgr15_MMX;
- rgb24tobgr16= rgb24tobgr16_MMX;
- rgb24tobgr24= rgb24tobgr24_MMX;
- rgb32tobgr32= rgb32tobgr32_MMX;
- rgb32tobgr16= rgb32tobgr16_MMX;
- rgb32tobgr15= rgb32tobgr15_MMX;
- yv12toyuy2= yv12toyuy2_MMX;
- yv12touyvy= yv12touyvy_MMX;
- yuv422ptoyuy2= yuv422ptoyuy2_MMX;
- yuy2toyv12= yuy2toyv12_MMX;
-// uyvytoyv12= uyvytoyv12_MMX;
-// yvu9toyv12= yvu9toyv12_MMX;
- planar2x= planar2x_MMX;
- rgb24toyv12= rgb24toyv12_MMX;
- interleaveBytes= interleaveBytes_MMX;
- vu9_to_vu12= vu9_to_vu12_MMX;
- yvu9_to_yuy2= yvu9_to_yuy2_MMX;
- }else
+ if (flags & SWS_CPU_CAPS_MMX2)
+ rgb2rgb_init_MMX2();
+ else if (flags & SWS_CPU_CAPS_3DNOW)
+ rgb2rgb_init_3DNOW();
+ else if (flags & SWS_CPU_CAPS_MMX)
+ rgb2rgb_init_MMX();
+ else
#endif /* defined(HAVE_MMX2) || defined(HAVE_3DNOW) || defined(HAVE_MMX) */
- {
- rgb15to16= rgb15to16_C;
- rgb15to24= rgb15to24_C;
- rgb15to32= rgb15to32_C;
- rgb16to24= rgb16to24_C;
- rgb16to32= rgb16to32_C;
- rgb16to15= rgb16to15_C;
- rgb24to16= rgb24to16_C;
- rgb24to15= rgb24to15_C;
- rgb24to32= rgb24to32_C;
- rgb32to16= rgb32to16_C;
- rgb32to15= rgb32to15_C;
- rgb32to24= rgb32to24_C;
- rgb24tobgr15= rgb24tobgr15_C;
- rgb24tobgr16= rgb24tobgr16_C;
- rgb24tobgr24= rgb24tobgr24_C;
- rgb32tobgr32= rgb32tobgr32_C;
- rgb32tobgr16= rgb32tobgr16_C;
- rgb32tobgr15= rgb32tobgr15_C;
- yv12toyuy2= yv12toyuy2_C;
- yv12touyvy= yv12touyvy_C;
- yuv422ptoyuy2= yuv422ptoyuy2_C;
- yuy2toyv12= yuy2toyv12_C;
-// uyvytoyv12= uyvytoyv12_C;
-// yvu9toyv12= yvu9toyv12_C;
- planar2x= planar2x_C;
- rgb24toyv12= rgb24toyv12_C;
- interleaveBytes= interleaveBytes_C;
- vu9_to_vu12= vu9_to_vu12_C;
- yvu9_to_yuy2= yvu9_to_yuy2_C;
- }
+ rgb2rgb_init_C();
}
/**
@@ -345,49 +214,49 @@ void sws_rgb2rgb_init(int flags){
*/
void palette8torgb32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
- long i;
+ long i;
/*
- for(i=0; i<num_pixels; i++)
- ((unsigned *)dst)[i] = ((unsigned *)palette)[ src[i] ];
+ for (i=0; i<num_pixels; i++)
+ ((unsigned *)dst)[i] = ((unsigned *)palette)[src[i]];
*/
- for(i=0; i<num_pixels; i++)
- {
- #ifdef WORDS_BIGENDIAN
- dst[3]= palette[ src[i]*4+2 ];
- dst[2]= palette[ src[i]*4+1 ];
- dst[1]= palette[ src[i]*4+0 ];
- #else
- //FIXME slow?
- dst[0]= palette[ src[i]*4+2 ];
- dst[1]= palette[ src[i]*4+1 ];
- dst[2]= palette[ src[i]*4+0 ];
- //dst[3]= 0; /* do we need this cleansing? */
- #endif
- dst+= 4;
- }
+ for (i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ dst[3]= palette[src[i]*4+2];
+ dst[2]= palette[src[i]*4+1];
+ dst[1]= palette[src[i]*4+0];
+ #else
+ //FIXME slow?
+ dst[0]= palette[src[i]*4+2];
+ dst[1]= palette[src[i]*4+1];
+ dst[2]= palette[src[i]*4+0];
+ //dst[3]= 0; /* do we need this cleansing? */
+ #endif
+ dst+= 4;
+ }
}
void palette8tobgr32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
- long i;
- for(i=0; i<num_pixels; i++)
- {
- #ifdef WORDS_BIGENDIAN
- dst[3]= palette[ src[i]*4+0 ];
- dst[2]= palette[ src[i]*4+1 ];
- dst[1]= palette[ src[i]*4+2 ];
- #else
- //FIXME slow?
- dst[0]= palette[ src[i]*4+0 ];
- dst[1]= palette[ src[i]*4+1 ];
- dst[2]= palette[ src[i]*4+2 ];
- //dst[3]= 0; /* do we need this cleansing? */
- #endif
-
- dst+= 4;
- }
+ long i;
+ for (i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ dst[3]= palette[src[i]*4+0];
+ dst[2]= palette[src[i]*4+1];
+ dst[1]= palette[src[i]*4+2];
+ #else
+ //FIXME slow?
+ dst[0]= palette[src[i]*4+0];
+ dst[1]= palette[src[i]*4+1];
+ dst[2]= palette[src[i]*4+2];
+ //dst[3]= 0; /* do we need this cleansing? */
+ #endif
+
+ dst+= 4;
+ }
}
/**
@@ -395,38 +264,38 @@ void palette8tobgr32(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
*/
void palette8torgb24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
- long i;
+ long i;
/*
- writes 1 byte o much and might cause alignment issues on some architectures?
- for(i=0; i<num_pixels; i++)
- ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
+ writes 1 byte o much and might cause alignment issues on some architectures?
+ for (i=0; i<num_pixels; i++)
+ ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[src[i]];
*/
- for(i=0; i<num_pixels; i++)
- {
- //FIXME slow?
- dst[0]= palette[ src[i]*4+2 ];
- dst[1]= palette[ src[i]*4+1 ];
- dst[2]= palette[ src[i]*4+0 ];
- dst+= 3;
- }
+ for (i=0; i<num_pixels; i++)
+ {
+ //FIXME slow?
+ dst[0]= palette[src[i]*4+2];
+ dst[1]= palette[src[i]*4+1];
+ dst[2]= palette[src[i]*4+0];
+ dst+= 3;
+ }
}
void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
- long i;
+ long i;
/*
- writes 1 byte o much and might cause alignment issues on some architectures?
- for(i=0; i<num_pixels; i++)
- ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
+ writes 1 byte o much and might cause alignment issues on some architectures?
+ for (i=0; i<num_pixels; i++)
+ ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[src[i]];
*/
- for(i=0; i<num_pixels; i++)
- {
- //FIXME slow?
- dst[0]= palette[ src[i]*4+0 ];
- dst[1]= palette[ src[i]*4+1 ];
- dst[2]= palette[ src[i]*4+2 ];
- dst+= 3;
- }
+ for (i=0; i<num_pixels; i++)
+ {
+ //FIXME slow?
+ dst[0]= palette[src[i]*4+0];
+ dst[1]= palette[src[i]*4+1];
+ dst[2]= palette[src[i]*4+2];
+ dst+= 3;
+ }
}
/**
@@ -434,15 +303,15 @@ void palette8tobgr24(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
*/
void palette8torgb16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
- long i;
- for(i=0; i<num_pixels; i++)
- ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
+ long i;
+ for (i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = ((uint16_t *)palette)[src[i]];
}
void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
- long i;
- for(i=0; i<num_pixels; i++)
- ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]);
+ long i;
+ for (i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[src[i]]);
}
/**
@@ -450,216 +319,216 @@ void palette8tobgr16(const uint8_t *src, uint8_t *dst, long num_pixels, const ui
*/
void palette8torgb15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
- long i;
- for(i=0; i<num_pixels; i++)
- ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
+ long i;
+ for (i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = ((uint16_t *)palette)[src[i]];
}
void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette)
{
- long i;
- for(i=0; i<num_pixels; i++)
- ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]);
+ long i;
+ for (i=0; i<num_pixels; i++)
+ ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[src[i]]);
}
void rgb32tobgr24(const uint8_t *src, uint8_t *dst, long src_size)
{
- long i;
- long num_pixels = src_size >> 2;
- for(i=0; i<num_pixels; i++)
- {
- #ifdef WORDS_BIGENDIAN
- /* RGB32 (= A,B,G,R) -> BGR24 (= B,G,R) */
- dst[3*i + 0] = src[4*i + 1];
- dst[3*i + 1] = src[4*i + 2];
- dst[3*i + 2] = src[4*i + 3];
- #else
- dst[3*i + 0] = src[4*i + 2];
- dst[3*i + 1] = src[4*i + 1];
- dst[3*i + 2] = src[4*i + 0];
- #endif
- }
+ long i;
+ long num_pixels = src_size >> 2;
+ for (i=0; i<num_pixels; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ /* RGB32 (= A,B,G,R) -> BGR24 (= B,G,R) */
+ dst[3*i + 0] = src[4*i + 1];
+ dst[3*i + 1] = src[4*i + 2];
+ dst[3*i + 2] = src[4*i + 3];
+ #else
+ dst[3*i + 0] = src[4*i + 2];
+ dst[3*i + 1] = src[4*i + 1];
+ dst[3*i + 2] = src[4*i + 0];
+ #endif
+ }
}
void rgb24tobgr32(const uint8_t *src, uint8_t *dst, long src_size)
{
- long i;
- for(i=0; 3*i<src_size; i++)
- {
- #ifdef WORDS_BIGENDIAN
- /* RGB24 (= R,G,B) -> BGR32 (= A,R,G,B) */
- dst[4*i + 0] = 0;
- dst[4*i + 1] = src[3*i + 0];
- dst[4*i + 2] = src[3*i + 1];
- dst[4*i + 3] = src[3*i + 2];
- #else
- dst[4*i + 0] = src[3*i + 2];
- dst[4*i + 1] = src[3*i + 1];
- dst[4*i + 2] = src[3*i + 0];
- dst[4*i + 3] = 0;
- #endif
- }
+ long i;
+ for (i=0; 3*i<src_size; i++)
+ {
+ #ifdef WORDS_BIGENDIAN
+ /* RGB24 (= R,G,B) -> BGR32 (= A,R,G,B) */
+ dst[4*i + 0] = 0;
+ dst[4*i + 1] = src[3*i + 0];
+ dst[4*i + 2] = src[3*i + 1];
+ dst[4*i + 3] = src[3*i + 2];
+ #else
+ dst[4*i + 0] = src[3*i + 2];
+ dst[4*i + 1] = src[3*i + 1];
+ dst[4*i + 2] = src[3*i + 0];
+ dst[4*i + 3] = 0;
+ #endif
+ }
}
void rgb16tobgr32(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint16_t *end;
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (uint16_t *)src;
- end = s + src_size/2;
- while(s < end)
- {
- register uint16_t bgr;
- bgr = *s++;
- #ifdef WORDS_BIGENDIAN
- *d++ = 0;
- *d++ = (bgr&0x1F)<<3;
- *d++ = (bgr&0x7E0)>>3;
- *d++ = (bgr&0xF800)>>8;
- #else
- *d++ = (bgr&0xF800)>>8;
- *d++ = (bgr&0x7E0)>>3;
- *d++ = (bgr&0x1F)<<3;
- *d++ = 0;
- #endif
- }
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ #ifdef WORDS_BIGENDIAN
+ *d++ = 0;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ #else
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = 0;
+ #endif
+ }
}
void rgb16tobgr24(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint16_t *end;
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (const uint16_t *)src;
- end = s + src_size/2;
- while(s < end)
- {
- register uint16_t bgr;
- bgr = *s++;
- *d++ = (bgr&0xF800)>>8;
- *d++ = (bgr&0x7E0)>>3;
- *d++ = (bgr&0x1F)<<3;
- }
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
+ }
}
void rgb16tobgr16(const uint8_t *src, uint8_t *dst, long src_size)
{
- long i;
- long num_pixels = src_size >> 1;
-
- for(i=0; i<num_pixels; i++)
- {
- unsigned b,g,r;
- register uint16_t rgb;
- rgb = src[2*i];
- r = rgb&0x1F;
- g = (rgb&0x7E0)>>5;
- b = (rgb&0xF800)>>11;
- dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
- }
+ long i;
+ long num_pixels = src_size >> 1;
+
+ for (i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x7E0)>>5;
+ b = (rgb&0xF800)>>11;
+ dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
+ }
}
void rgb16tobgr15(const uint8_t *src, uint8_t *dst, long src_size)
{
- long i;
- long num_pixels = src_size >> 1;
-
- for(i=0; i<num_pixels; i++)
- {
- unsigned b,g,r;
- register uint16_t rgb;
- rgb = src[2*i];
- r = rgb&0x1F;
- g = (rgb&0x7E0)>>5;
- b = (rgb&0xF800)>>11;
- dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
- }
+ long i;
+ long num_pixels = src_size >> 1;
+
+ for (i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x7E0)>>5;
+ b = (rgb&0xF800)>>11;
+ dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
+ }
}
void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint16_t *end;
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (const uint16_t *)src;
- end = s + src_size/2;
- while(s < end)
- {
- register uint16_t bgr;
- bgr = *s++;
- #ifdef WORDS_BIGENDIAN
- *d++ = 0;
- *d++ = (bgr&0x1F)<<3;
- *d++ = (bgr&0x3E0)>>2;
- *d++ = (bgr&0x7C00)>>7;
- #else
- *d++ = (bgr&0x7C00)>>7;
- *d++ = (bgr&0x3E0)>>2;
- *d++ = (bgr&0x1F)<<3;
- *d++ = 0;
- #endif
- }
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ #ifdef WORDS_BIGENDIAN
+ *d++ = 0;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ #else
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = 0;
+ #endif
+ }
}
void rgb15tobgr24(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint16_t *end;
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (uint16_t *)src;
- end = s + src_size/2;
- while(s < end)
- {
- register uint16_t bgr;
- bgr = *s++;
- *d++ = (bgr&0x7C00)>>7;
- *d++ = (bgr&0x3E0)>>2;
- *d++ = (bgr&0x1F)<<3;
- }
+ const uint16_t *end;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
+ }
}
void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size)
{
- long i;
- long num_pixels = src_size >> 1;
-
- for(i=0; i<num_pixels; i++)
- {
- unsigned b,g,r;
- register uint16_t rgb;
- rgb = src[2*i];
- r = rgb&0x1F;
- g = (rgb&0x3E0)>>5;
- b = (rgb&0x7C00)>>10;
- dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
- }
+ long i;
+ long num_pixels = src_size >> 1;
+
+ for (i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x3E0)>>5;
+ b = (rgb&0x7C00)>>10;
+ dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
+ }
}
void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size)
{
- long i;
- long num_pixels = src_size >> 1;
-
- for(i=0; i<num_pixels; i++)
- {
- unsigned b,g,r;
- register uint16_t rgb;
- rgb = src[2*i];
- r = rgb&0x1F;
- g = (rgb&0x3E0)>>5;
- b = (rgb&0x7C00)>>10;
- dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
- }
+ long i;
+ long num_pixels = src_size >> 1;
+
+ for (i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint16_t rgb;
+ rgb = src[2*i];
+ r = rgb&0x1F;
+ g = (rgb&0x3E0)>>5;
+ b = (rgb&0x7C00)>>10;
+ dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
+ }
}
void rgb8tobgr8(const uint8_t *src, uint8_t *dst, long src_size)
{
- long i;
- long num_pixels = src_size;
- for(i=0; i<num_pixels; i++)
- {
- unsigned b,g,r;
- register uint8_t rgb;
- rgb = src[i];
- r = (rgb&0x07);
- g = (rgb&0x38)>>3;
- b = (rgb&0xC0)>>6;
- dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6);
- }
+ long i;
+ long num_pixels = src_size;
+ for (i=0; i<num_pixels; i++)
+ {
+ unsigned b,g,r;
+ register uint8_t rgb;
+ rgb = src[i];
+ r = (rgb&0x07);
+ g = (rgb&0x38)>>3;
+ b = (rgb&0xC0)>>6;
+ dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6);
+ }
}
diff --git a/contrib/ffmpeg/libswscale/rgb2rgb.h b/contrib/ffmpeg/libswscale/rgb2rgb.h
index 4b5cc0a69..260732f73 100644
--- a/contrib/ffmpeg/libswscale/rgb2rgb.h
+++ b/contrib/ffmpeg/libswscale/rgb2rgb.h
@@ -1,5 +1,4 @@
/*
- *
* rgb2rgb.h, Software RGB to RGB convertor
* pluralize by Software PAL8 to RGB convertor
* Software YUV to YUV convertor
@@ -21,27 +20,27 @@
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef RGB2RGB_INCLUDED
-#define RGB2RGB_INCLUDED
+#ifndef FFMPEG_RGB2RGB_H
+#define FFMPEG_RGB2RGB_H
-// Note: do not fix the dependence on stdio.h
+#include <inttypes.h>
/* A full collection of rgb to rgb(bgr) convertors */
-extern void (*rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb24to16)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb24to15)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb32to16)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb32to15)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb15to24)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb15to32)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb16to24)(const uint8_t *src,uint8_t *dst,long src_size);
-extern void (*rgb16to32)(const uint8_t *src,uint8_t *dst,long src_size);
+extern void (*rgb24to32) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24to16) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb24to15) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32to24) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32to16) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb32to15) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb15to16) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb15to24) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb15to32) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb16to15) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb16to24) (const uint8_t *src, uint8_t *dst, long src_size);
+extern void (*rgb16to32) (const uint8_t *src, uint8_t *dst, long src_size);
extern void (*rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size);
extern void (*rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size);
extern void (*rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size);
@@ -59,7 +58,7 @@ extern void rgb15tobgr32(const uint8_t *src, uint8_t *dst, long src_size);
extern void rgb15tobgr24(const uint8_t *src, uint8_t *dst, long src_size);
extern void rgb15tobgr16(const uint8_t *src, uint8_t *dst, long src_size);
extern void rgb15tobgr15(const uint8_t *src, uint8_t *dst, long src_size);
-extern void rgb8tobgr8(const uint8_t *src, uint8_t *dst, long src_size);
+extern void rgb8tobgr8 (const uint8_t *src, uint8_t *dst, long src_size);
extern void palette8torgb32(const uint8_t *src, uint8_t *dst, long num_pixels, const uint8_t *palette);
@@ -85,16 +84,16 @@ extern void palette8tobgr15(const uint8_t *src, uint8_t *dst, long num_pixels, c
* problem for anyone then tell me, and ill fix it)
*/
extern void (*yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride);
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
/**
*
* width should be a multiple of 16
*/
extern void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride);
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
/**
*
@@ -102,8 +101,8 @@ extern void (*yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uin
* problem for anyone then tell me, and ill fix it)
*/
extern void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
- long width, long height,
- long lumStride, long chromStride, long srcStride);
+ long width, long height,
+ long lumStride, long chromStride, long srcStride);
/**
*
@@ -111,8 +110,8 @@ extern void (*yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint
* problem for anyone then tell me, and ill fix it)
*/
extern void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride);
+ long width, long height,
+ long lumStride, long chromStride, long dstStride);
/**
*
@@ -121,27 +120,27 @@ extern void (*yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_
* chrominance data is only taken from every secound line others are ignored FIXME write HQ version
*/
extern void (*rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
- long width, long height,
- long lumStride, long chromStride, long srcStride);
+ long width, long height,
+ long lumStride, long chromStride, long srcStride);
extern void (*planar2x)(const uint8_t *src, uint8_t *dst, long width, long height,
- long srcStride, long dstStride);
+ long srcStride, long dstStride);
extern void (*interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dst,
- long width, long height, long src1Stride,
- long src2Stride, long dstStride);
+ long width, long height, long src1Stride,
+ long src2Stride, long dstStride);
extern void (*vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
- uint8_t *dst1, uint8_t *dst2,
- long width, long height,
- long srcStride1, long srcStride2,
- long dstStride1, long dstStride2);
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long dstStride1, long dstStride2);
extern void (*yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
- uint8_t *dst,
- long width, long height,
- long srcStride1, long srcStride2,
- long srcStride3, long dstStride);
-
+ uint8_t *dst,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long srcStride3, long dstStride);
+
void sws_rgb2rgb_init(int flags);
-#endif
+#endif /* FFMPEG_RGB2RGB_H */
diff --git a/contrib/ffmpeg/libswscale/rgb2rgb_template.c b/contrib/ffmpeg/libswscale/rgb2rgb_template.c
index d9511c955..4aa607d9a 100644
--- a/contrib/ffmpeg/libswscale/rgb2rgb_template.c
+++ b/contrib/ffmpeg/libswscale/rgb2rgb_template.c
@@ -1,5 +1,4 @@
/*
- *
* rgb2rgb.c, Software RGB to RGB convertor
* pluralize by Software PAL8 to RGB convertor
* Software YUV to YUV convertor
@@ -22,17 +21,17 @@
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * the C code (not assembly, mmx, ...) of this file can be used
- * under the LGPL license too
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * The C code (not assembly, mmx, ...) of this file can be used
+ * under the LGPL license.
*/
#include <stddef.h>
#include <inttypes.h> /* for __WORDSIZE */
#ifndef __WORDSIZE
-// #warning You have misconfigured system and probably will lose performance!
+// #warning You have a misconfigured system and will probably lose performance!
#define __WORDSIZE MP_WORDSIZE
#endif
@@ -53,11 +52,11 @@
#ifdef HAVE_3DNOW
#define PREFETCH "prefetch"
#define PREFETCHW "prefetchw"
-#define PAVGB "pavgusb"
-#elif defined ( HAVE_MMX2 )
+#define PAVGB "pavgusb"
+#elif defined (HAVE_MMX2)
#define PREFETCH "prefetchnta"
#define PREFETCHW "prefetcht0"
-#define PAVGB "pavgb"
+#define PAVGB "pavgb"
#else
#ifdef __APPLE__
#define PREFETCH "#"
@@ -69,7 +68,7 @@
#endif
#ifdef HAVE_3DNOW
-/* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
+/* On K6 femms is faster than emms. On K7 femms is directly mapped on emms. */
#define EMMS "femms"
#else
#define EMMS "emms"
@@ -83,836 +82,838 @@
#define SFENCE " # nop"
#endif
-static inline void RENAME(rgb24to32)(const uint8_t *src,uint8_t *dst,long src_size)
+static inline void RENAME(rgb24to32)(const uint8_t *src, uint8_t *dst, long src_size)
{
- uint8_t *dest = dst;
- const uint8_t *s = src;
- const uint8_t *end;
-#ifdef HAVE_MMX
- const uint8_t *mm_end;
-#endif
- end = s + src_size;
-#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- mm_end = end - 23;
- __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory");
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "punpckldq 3%1, %%mm0\n\t"
- "movd 6%1, %%mm1\n\t"
- "punpckldq 9%1, %%mm1\n\t"
- "movd 12%1, %%mm2\n\t"
- "punpckldq 15%1, %%mm2\n\t"
- "movd 18%1, %%mm3\n\t"
- "punpckldq 21%1, %%mm3\n\t"
- "pand %%mm7, %%mm0\n\t"
- "pand %%mm7, %%mm1\n\t"
- "pand %%mm7, %%mm2\n\t"
- "pand %%mm7, %%mm3\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm1, 8%0\n\t"
- MOVNTQ" %%mm2, 16%0\n\t"
- MOVNTQ" %%mm3, 24%0"
- :"=m"(*dest)
- :"m"(*s)
- :"memory");
- dest += 32;
- s += 24;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
-#ifdef WORDS_BIGENDIAN
- /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */
- *dest++ = 0;
- *dest++ = s[2];
- *dest++ = s[1];
- *dest++ = s[0];
- s+=3;
-#else
- *dest++ = *s++;
- *dest++ = *s++;
- *dest++ = *s++;
- *dest++ = 0;
-#endif
- }
+ uint8_t *dest = dst;
+ const uint8_t *s = src;
+ const uint8_t *end;
+ #ifdef HAVE_MMX
+ const uint8_t *mm_end;
+ #endif
+ end = s + src_size;
+ #ifdef HAVE_MMX
+ asm volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 23;
+ asm volatile("movq %0, %%mm7"::"m"(mask32):"memory");
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "punpckldq 3%1, %%mm0 \n\t"
+ "movd 6%1, %%mm1 \n\t"
+ "punpckldq 9%1, %%mm1 \n\t"
+ "movd 12%1, %%mm2 \n\t"
+ "punpckldq 15%1, %%mm2 \n\t"
+ "movd 18%1, %%mm3 \n\t"
+ "punpckldq 21%1, %%mm3 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm2, 16%0 \n\t"
+ MOVNTQ" %%mm3, 24%0"
+ :"=m"(*dest)
+ :"m"(*s)
+ :"memory");
+ dest += 32;
+ s += 24;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+ #endif
+ while (s < end)
+ {
+ #ifdef WORDS_BIGENDIAN
+ /* RGB24 (= R,G,B) -> RGB32 (= A,B,G,R) */
+ *dest++ = 0;
+ *dest++ = s[2];
+ *dest++ = s[1];
+ *dest++ = s[0];
+ s+=3;
+ #else
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = 0;
+ #endif
+ }
}
-static inline void RENAME(rgb32to24)(const uint8_t *src,uint8_t *dst,long src_size)
+static inline void RENAME(rgb32to24)(const uint8_t *src, uint8_t *dst, long src_size)
{
- uint8_t *dest = dst;
- const uint8_t *s = src;
- const uint8_t *end;
+ uint8_t *dest = dst;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- end = s + src_size;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- mm_end = end - 31;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq 8%1, %%mm1\n\t"
- "movq 16%1, %%mm4\n\t"
- "movq 24%1, %%mm5\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
- "movq %%mm4, %%mm6\n\t"
- "movq %%mm5, %%mm7\n\t"
- "psrlq $8, %%mm2\n\t"
- "psrlq $8, %%mm3\n\t"
- "psrlq $8, %%mm6\n\t"
- "psrlq $8, %%mm7\n\t"
- "pand %2, %%mm0\n\t"
- "pand %2, %%mm1\n\t"
- "pand %2, %%mm4\n\t"
- "pand %2, %%mm5\n\t"
- "pand %3, %%mm2\n\t"
- "pand %3, %%mm3\n\t"
- "pand %3, %%mm6\n\t"
- "pand %3, %%mm7\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm3, %%mm1\n\t"
- "por %%mm6, %%mm4\n\t"
- "por %%mm7, %%mm5\n\t"
-
- "movq %%mm1, %%mm2\n\t"
- "movq %%mm4, %%mm3\n\t"
- "psllq $48, %%mm2\n\t"
- "psllq $32, %%mm3\n\t"
- "pand %4, %%mm2\n\t"
- "pand %5, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "psrlq $16, %%mm1\n\t"
- "psrlq $32, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm3, %%mm1\n\t"
- "pand %6, %%mm5\n\t"
- "por %%mm5, %%mm4\n\t"
-
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm1, 8%0\n\t"
- MOVNTQ" %%mm4, 16%0"
- :"=m"(*dest)
- :"m"(*s),"m"(mask24l),
- "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
- :"memory");
- dest += 24;
- s += 32;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
+ asm volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 31;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm1 \n\t"
+ "movq 16%1, %%mm4 \n\t"
+ "movq 24%1, %%mm5 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+ "movq %%mm4, %%mm6 \n\t"
+ "movq %%mm5, %%mm7 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "psrlq $8, %%mm3 \n\t"
+ "psrlq $8, %%mm6 \n\t"
+ "psrlq $8, %%mm7 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm1 \n\t"
+ "pand %2, %%mm4 \n\t"
+ "pand %2, %%mm5 \n\t"
+ "pand %3, %%mm2 \n\t"
+ "pand %3, %%mm3 \n\t"
+ "pand %3, %%mm6 \n\t"
+ "pand %3, %%mm7 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm6, %%mm4 \n\t"
+ "por %%mm7, %%mm5 \n\t"
+
+ "movq %%mm1, %%mm2 \n\t"
+ "movq %%mm4, %%mm3 \n\t"
+ "psllq $48, %%mm2 \n\t"
+ "psllq $32, %%mm3 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "pand %5, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psrlq $16, %%mm1 \n\t"
+ "psrlq $32, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "pand %6, %%mm5 \n\t"
+ "por %%mm5, %%mm4 \n\t"
+
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm4, 16%0"
+ :"=m"(*dest)
+ :"m"(*s),"m"(mask24l),
+ "m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ dest += 24;
+ s += 32;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
#ifdef WORDS_BIGENDIAN
- /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */
- s++;
- dest[2] = *s++;
- dest[1] = *s++;
- dest[0] = *s++;
- dest += 3;
+ /* RGB32 (= A,B,G,R) -> RGB24 (= R,G,B) */
+ s++;
+ dest[2] = *s++;
+ dest[1] = *s++;
+ dest[0] = *s++;
+ dest += 3;
#else
- *dest++ = *s++;
- *dest++ = *s++;
- *dest++ = *s++;
- s++;
+ *dest++ = *s++;
+ *dest++ = *s++;
+ *dest++ = *s++;
+ s++;
#endif
- }
+ }
}
/*
Original by Strepto/Astral
ported to gcc & bugfixed : A'rpi
MMX2, 3DNOW optimization by Nick Kurshev
- 32bit c version, and and&add trick by Michael Niedermayer
+ 32 bit C version, and and&add trick by Michael Niedermayer
*/
-static inline void RENAME(rgb15to16)(const uint8_t *src,uint8_t *dst,long src_size)
+static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, long src_size)
{
- register const uint8_t* s=src;
- register uint8_t* d=dst;
- register const uint8_t *end;
- const uint8_t *mm_end;
- end = s + src_size;
+ register const uint8_t* s=src;
+ register uint8_t* d=dst;
+ register const uint8_t *end;
+ const uint8_t *mm_end;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s));
- __asm __volatile("movq %0, %%mm4"::"m"(mask15s));
- mm_end = end - 15;
- while(s<mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq 8%1, %%mm2\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm2, %%mm3\n\t"
- "pand %%mm4, %%mm0\n\t"
- "pand %%mm4, %%mm2\n\t"
- "paddw %%mm1, %%mm0\n\t"
- "paddw %%mm3, %%mm2\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm2, 8%0"
- :"=m"(*d)
- :"m"(*s)
- );
- d+=16;
- s+=16;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ asm volatile(PREFETCH" %0"::"m"(*s));
+ asm volatile("movq %0, %%mm4"::"m"(mask15s));
+ mm_end = end - 15;
+ while (s<mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "pand %%mm4, %%mm0 \n\t"
+ "pand %%mm4, %%mm2 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm2, 8%0"
+ :"=m"(*d)
+ :"m"(*s)
+ );
+ d+=16;
+ s+=16;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
#endif
mm_end = end - 3;
- while(s < mm_end)
+ while (s < mm_end)
{
- register unsigned x= *((uint32_t *)s);
- *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
- d+=4;
- s+=4;
+ register unsigned x= *((uint32_t *)s);
+ *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
+ d+=4;
+ s+=4;
}
- if(s < end)
+ if (s < end)
{
- register unsigned short x= *((uint16_t *)s);
- *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
+ register unsigned short x= *((uint16_t *)s);
+ *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
}
}
-static inline void RENAME(rgb16to15)(const uint8_t *src,uint8_t *dst,long src_size)
+static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, long src_size)
{
- register const uint8_t* s=src;
- register uint8_t* d=dst;
- register const uint8_t *end;
- const uint8_t *mm_end;
- end = s + src_size;
+ register const uint8_t* s=src;
+ register uint8_t* d=dst;
+ register const uint8_t *end;
+ const uint8_t *mm_end;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s));
- __asm __volatile("movq %0, %%mm7"::"m"(mask15rg));
- __asm __volatile("movq %0, %%mm6"::"m"(mask15b));
- mm_end = end - 15;
- while(s<mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq 8%1, %%mm2\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm2, %%mm3\n\t"
- "psrlq $1, %%mm0\n\t"
- "psrlq $1, %%mm2\n\t"
- "pand %%mm7, %%mm0\n\t"
- "pand %%mm7, %%mm2\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm3\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm3, %%mm2\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm2, 8%0"
- :"=m"(*d)
- :"m"(*s)
- );
- d+=16;
- s+=16;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ asm volatile(PREFETCH" %0"::"m"(*s));
+ asm volatile("movq %0, %%mm7"::"m"(mask15rg));
+ asm volatile("movq %0, %%mm6"::"m"(mask15b));
+ mm_end = end - 15;
+ while (s<mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $1, %%mm0 \n\t"
+ "psrlq $1, %%mm2 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm3, %%mm2 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm2, 8%0"
+ :"=m"(*d)
+ :"m"(*s)
+ );
+ d+=16;
+ s+=16;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
#endif
mm_end = end - 3;
- while(s < mm_end)
+ while (s < mm_end)
{
- register uint32_t x= *((uint32_t *)s);
- *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
- s+=4;
- d+=4;
+ register uint32_t x= *((uint32_t *)s);
+ *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
+ s+=4;
+ d+=4;
}
- if(s < end)
+ if (s < end)
{
- register uint16_t x= *((uint16_t *)s);
- *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
- s+=2;
- d+=2;
+ register uint16_t x= *((uint16_t *)s);
+ *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
+ s+=2;
+ d+=2;
}
}
static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint8_t *s = src;
- const uint8_t *end;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- uint16_t *d = (uint16_t *)dst;
- end = s + src_size;
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
#ifdef HAVE_MMX
- mm_end = end - 15;
-#if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster)
- asm volatile(
- "movq %3, %%mm5 \n\t"
- "movq %4, %%mm6 \n\t"
- "movq %5, %%mm7 \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 32(%1) \n\t"
- "movd (%1), %%mm0 \n\t"
- "movd 4(%1), %%mm3 \n\t"
- "punpckldq 8(%1), %%mm0 \n\t"
- "punpckldq 12(%1), %%mm3 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm3, %%mm4 \n\t"
- "pand %%mm6, %%mm0 \n\t"
- "pand %%mm6, %%mm3 \n\t"
- "pmaddwd %%mm7, %%mm0 \n\t"
- "pmaddwd %%mm7, %%mm3 \n\t"
- "pand %%mm5, %%mm1 \n\t"
- "pand %%mm5, %%mm4 \n\t"
- "por %%mm1, %%mm0 \n\t"
- "por %%mm4, %%mm3 \n\t"
- "psrld $5, %%mm0 \n\t"
- "pslld $11, %%mm3 \n\t"
- "por %%mm3, %%mm0 \n\t"
- MOVNTQ" %%mm0, (%0) \n\t"
- "add $16, %1 \n\t"
- "add $8, %0 \n\t"
- "cmp %2, %1 \n\t"
- " jb 1b \n\t"
- : "+r" (d), "+r"(s)
- : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
- );
+ mm_end = end - 15;
+#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
+ asm volatile(
+ "movq %3, %%mm5 \n\t"
+ "movq %4, %%mm6 \n\t"
+ "movq %5, %%mm7 \n\t"
+ "jmp 2f \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1) \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd 4(%1), %%mm3 \n\t"
+ "punpckldq 8(%1), %%mm0 \n\t"
+ "punpckldq 12(%1), %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "pmaddwd %%mm7, %%mm0 \n\t"
+ "pmaddwd %%mm7, %%mm3 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm5, %%mm4 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "psrld $5, %%mm0 \n\t"
+ "pslld $11, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, (%0) \n\t"
+ "add $16, %1 \n\t"
+ "add $8, %0 \n\t"
+ "2: \n\t"
+ "cmp %2, %1 \n\t"
+ " jb 1b \n\t"
+ : "+r" (d), "+r"(s)
+ : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
+ );
#else
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6\n\t"
- ::"m"(red_16mask),"m"(green_16mask));
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "movd 4%1, %%mm3\n\t"
- "punpckldq 8%1, %%mm0\n\t"
- "punpckldq 12%1, %%mm3\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm3, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "psrlq $3, %%mm0\n\t"
- "psrlq $3, %%mm3\n\t"
- "pand %2, %%mm0\n\t"
- "pand %2, %%mm3\n\t"
- "psrlq $5, %%mm1\n\t"
- "psrlq $5, %%mm4\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm4\n\t"
- "psrlq $8, %%mm2\n\t"
- "psrlq $8, %%mm5\n\t"
- "pand %%mm7, %%mm2\n\t"
- "pand %%mm7, %%mm5\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm5, %%mm3\n\t"
- "psllq $16, %%mm3\n\t"
- "por %%mm3, %%mm0\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
- d += 4;
- s += 16;
- }
-#endif
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- register int rgb = *(uint32_t*)s; s += 4;
- *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
- }
+ asm volatile(PREFETCH" %0"::"m"(*src):"memory");
+ asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 4%1, %%mm3 \n\t"
+ "punpckldq 8%1, %%mm0 \n\t"
+ "punpckldq 12%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psrlq $3, %%mm0 \n\t"
+ "psrlq $3, %%mm3 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm3 \n\t"
+ "psrlq $5, %%mm1 \n\t"
+ "psrlq $5, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "psrlq $8, %%mm5 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 16;
+ }
+#endif
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
+ }
}
static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint8_t *s = src;
- const uint8_t *end;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- uint16_t *d = (uint16_t *)dst;
- end = s + src_size;
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6\n\t"
- ::"m"(red_16mask),"m"(green_16mask));
- mm_end = end - 15;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "movd 4%1, %%mm3\n\t"
- "punpckldq 8%1, %%mm0\n\t"
- "punpckldq 12%1, %%mm3\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm3, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "psllq $8, %%mm0\n\t"
- "psllq $8, %%mm3\n\t"
- "pand %%mm7, %%mm0\n\t"
- "pand %%mm7, %%mm3\n\t"
- "psrlq $5, %%mm1\n\t"
- "psrlq $5, %%mm4\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm4\n\t"
- "psrlq $19, %%mm2\n\t"
- "psrlq $19, %%mm5\n\t"
- "pand %2, %%mm2\n\t"
- "pand %2, %%mm5\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm5, %%mm3\n\t"
- "psllq $16, %%mm3\n\t"
- "por %%mm3, %%mm0\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
- d += 4;
- s += 16;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- register int rgb = *(uint32_t*)s; s += 4;
- *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
- }
+ asm volatile(PREFETCH" %0"::"m"(*src):"memory");
+ asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 15;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 4%1, %%mm3 \n\t"
+ "punpckldq 8%1, %%mm0 \n\t"
+ "punpckldq 12%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psllq $8, %%mm0 \n\t"
+ "psllq $8, %%mm3 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "psrlq $5, %%mm1 \n\t"
+ "psrlq $5, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $19, %%mm2 \n\t"
+ "psrlq $19, %%mm5 \n\t"
+ "pand %2, %%mm2 \n\t"
+ "pand %2, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 16;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
+ }
}
static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint8_t *s = src;
- const uint8_t *end;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- uint16_t *d = (uint16_t *)dst;
- end = s + src_size;
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
#ifdef HAVE_MMX
- mm_end = end - 15;
-#if 1 //is faster only if multiplies are reasonable fast (FIXME figure out on which cpus this is faster, on Athlon its slightly faster)
- asm volatile(
- "movq %3, %%mm5 \n\t"
- "movq %4, %%mm6 \n\t"
- "movq %5, %%mm7 \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 32(%1) \n\t"
- "movd (%1), %%mm0 \n\t"
- "movd 4(%1), %%mm3 \n\t"
- "punpckldq 8(%1), %%mm0 \n\t"
- "punpckldq 12(%1), %%mm3 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm3, %%mm4 \n\t"
- "pand %%mm6, %%mm0 \n\t"
- "pand %%mm6, %%mm3 \n\t"
- "pmaddwd %%mm7, %%mm0 \n\t"
- "pmaddwd %%mm7, %%mm3 \n\t"
- "pand %%mm5, %%mm1 \n\t"
- "pand %%mm5, %%mm4 \n\t"
- "por %%mm1, %%mm0 \n\t"
- "por %%mm4, %%mm3 \n\t"
- "psrld $6, %%mm0 \n\t"
- "pslld $10, %%mm3 \n\t"
- "por %%mm3, %%mm0 \n\t"
- MOVNTQ" %%mm0, (%0) \n\t"
- "add $16, %1 \n\t"
- "add $8, %0 \n\t"
- "cmp %2, %1 \n\t"
- " jb 1b \n\t"
- : "+r" (d), "+r"(s)
- : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
- );
+ mm_end = end - 15;
+#if 1 //is faster only if multiplies are reasonably fast (FIXME figure out on which CPUs this is faster, on Athlon it is slightly faster)
+ asm volatile(
+ "movq %3, %%mm5 \n\t"
+ "movq %4, %%mm6 \n\t"
+ "movq %5, %%mm7 \n\t"
+ "jmp 2f \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1) \n\t"
+ "movd (%1), %%mm0 \n\t"
+ "movd 4(%1), %%mm3 \n\t"
+ "punpckldq 8(%1), %%mm0 \n\t"
+ "punpckldq 12(%1), %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "pmaddwd %%mm7, %%mm0 \n\t"
+ "pmaddwd %%mm7, %%mm3 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm5, %%mm4 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "psrld $6, %%mm0 \n\t"
+ "pslld $10, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, (%0) \n\t"
+ "add $16, %1 \n\t"
+ "add $8, %0 \n\t"
+ "2: \n\t"
+ "cmp %2, %1 \n\t"
+ " jb 1b \n\t"
+ : "+r" (d), "+r"(s)
+ : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
+ );
#else
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6\n\t"
- ::"m"(red_15mask),"m"(green_15mask));
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "movd 4%1, %%mm3\n\t"
- "punpckldq 8%1, %%mm0\n\t"
- "punpckldq 12%1, %%mm3\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm3, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "psrlq $3, %%mm0\n\t"
- "psrlq $3, %%mm3\n\t"
- "pand %2, %%mm0\n\t"
- "pand %2, %%mm3\n\t"
- "psrlq $6, %%mm1\n\t"
- "psrlq $6, %%mm4\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm4\n\t"
- "psrlq $9, %%mm2\n\t"
- "psrlq $9, %%mm5\n\t"
- "pand %%mm7, %%mm2\n\t"
- "pand %%mm7, %%mm5\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm5, %%mm3\n\t"
- "psllq $16, %%mm3\n\t"
- "por %%mm3, %%mm0\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
- d += 4;
- s += 16;
- }
-#endif
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- register int rgb = *(uint32_t*)s; s += 4;
- *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
- }
+ asm volatile(PREFETCH" %0"::"m"(*src):"memory");
+ asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 4%1, %%mm3 \n\t"
+ "punpckldq 8%1, %%mm0 \n\t"
+ "punpckldq 12%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psrlq $3, %%mm0 \n\t"
+ "psrlq $3, %%mm3 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm3 \n\t"
+ "psrlq $6, %%mm1 \n\t"
+ "psrlq $6, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $9, %%mm2 \n\t"
+ "psrlq $9, %%mm5 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 16;
+ }
+#endif
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
+ }
}
static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint8_t *s = src;
- const uint8_t *end;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- uint16_t *d = (uint16_t *)dst;
- end = s + src_size;
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6\n\t"
- ::"m"(red_15mask),"m"(green_15mask));
- mm_end = end - 15;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "movd 4%1, %%mm3\n\t"
- "punpckldq 8%1, %%mm0\n\t"
- "punpckldq 12%1, %%mm3\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm3, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "psllq $7, %%mm0\n\t"
- "psllq $7, %%mm3\n\t"
- "pand %%mm7, %%mm0\n\t"
- "pand %%mm7, %%mm3\n\t"
- "psrlq $6, %%mm1\n\t"
- "psrlq $6, %%mm4\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm4\n\t"
- "psrlq $19, %%mm2\n\t"
- "psrlq $19, %%mm5\n\t"
- "pand %2, %%mm2\n\t"
- "pand %2, %%mm5\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm5, %%mm3\n\t"
- "psllq $16, %%mm3\n\t"
- "por %%mm3, %%mm0\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
- d += 4;
- s += 16;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- register int rgb = *(uint32_t*)s; s += 4;
- *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
- }
+ asm volatile(PREFETCH" %0"::"m"(*src):"memory");
+ asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 15;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 4%1, %%mm3 \n\t"
+ "punpckldq 8%1, %%mm0 \n\t"
+ "punpckldq 12%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psllq $7, %%mm0 \n\t"
+ "psllq $7, %%mm3 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "psrlq $6, %%mm1 \n\t"
+ "psrlq $6, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $19, %%mm2 \n\t"
+ "psrlq $19, %%mm5 \n\t"
+ "pand %2, %%mm2 \n\t"
+ "pand %2, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 16;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ register int rgb = *(uint32_t*)s; s += 4;
+ *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
+ }
}
static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint8_t *s = src;
- const uint8_t *end;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- uint16_t *d = (uint16_t *)dst;
- end = s + src_size;
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6\n\t"
- ::"m"(red_16mask),"m"(green_16mask));
- mm_end = end - 11;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "movd 3%1, %%mm3\n\t"
- "punpckldq 6%1, %%mm0\n\t"
- "punpckldq 9%1, %%mm3\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm3, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "psrlq $3, %%mm0\n\t"
- "psrlq $3, %%mm3\n\t"
- "pand %2, %%mm0\n\t"
- "pand %2, %%mm3\n\t"
- "psrlq $5, %%mm1\n\t"
- "psrlq $5, %%mm4\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm4\n\t"
- "psrlq $8, %%mm2\n\t"
- "psrlq $8, %%mm5\n\t"
- "pand %%mm7, %%mm2\n\t"
- "pand %%mm7, %%mm5\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm5, %%mm3\n\t"
- "psllq $16, %%mm3\n\t"
- "por %%mm3, %%mm0\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
- d += 4;
- s += 12;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- const int b= *s++;
- const int g= *s++;
- const int r= *s++;
- *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
- }
+ asm volatile(PREFETCH" %0"::"m"(*src):"memory");
+ asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 11;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 3%1, %%mm3 \n\t"
+ "punpckldq 6%1, %%mm0 \n\t"
+ "punpckldq 9%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psrlq $3, %%mm0 \n\t"
+ "psrlq $3, %%mm3 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm3 \n\t"
+ "psrlq $5, %%mm1 \n\t"
+ "psrlq $5, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "psrlq $8, %%mm5 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ const int b = *s++;
+ const int g = *s++;
+ const int r = *s++;
+ *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+ }
}
static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint8_t *s = src;
- const uint8_t *end;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- uint16_t *d = (uint16_t *)dst;
- end = s + src_size;
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6\n\t"
- ::"m"(red_16mask),"m"(green_16mask));
- mm_end = end - 15;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "movd 3%1, %%mm3\n\t"
- "punpckldq 6%1, %%mm0\n\t"
- "punpckldq 9%1, %%mm3\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm3, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "psllq $8, %%mm0\n\t"
- "psllq $8, %%mm3\n\t"
- "pand %%mm7, %%mm0\n\t"
- "pand %%mm7, %%mm3\n\t"
- "psrlq $5, %%mm1\n\t"
- "psrlq $5, %%mm4\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm4\n\t"
- "psrlq $19, %%mm2\n\t"
- "psrlq $19, %%mm5\n\t"
- "pand %2, %%mm2\n\t"
- "pand %2, %%mm5\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm5, %%mm3\n\t"
- "psllq $16, %%mm3\n\t"
- "por %%mm3, %%mm0\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
- d += 4;
- s += 12;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- const int r= *s++;
- const int g= *s++;
- const int b= *s++;
- *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
- }
+ asm volatile(PREFETCH" %0"::"m"(*src):"memory");
+ asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_16mask),"m"(green_16mask));
+ mm_end = end - 15;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 3%1, %%mm3 \n\t"
+ "punpckldq 6%1, %%mm0 \n\t"
+ "punpckldq 9%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psllq $8, %%mm0 \n\t"
+ "psllq $8, %%mm3 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "psrlq $5, %%mm1 \n\t"
+ "psrlq $5, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $19, %%mm2 \n\t"
+ "psrlq $19, %%mm5 \n\t"
+ "pand %2, %%mm2 \n\t"
+ "pand %2, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_16mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ const int r = *s++;
+ const int g = *s++;
+ const int b = *s++;
+ *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
+ }
}
static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint8_t *s = src;
- const uint8_t *end;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- uint16_t *d = (uint16_t *)dst;
- end = s + src_size;
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6\n\t"
- ::"m"(red_15mask),"m"(green_15mask));
- mm_end = end - 11;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "movd 3%1, %%mm3\n\t"
- "punpckldq 6%1, %%mm0\n\t"
- "punpckldq 9%1, %%mm3\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm3, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "psrlq $3, %%mm0\n\t"
- "psrlq $3, %%mm3\n\t"
- "pand %2, %%mm0\n\t"
- "pand %2, %%mm3\n\t"
- "psrlq $6, %%mm1\n\t"
- "psrlq $6, %%mm4\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm4\n\t"
- "psrlq $9, %%mm2\n\t"
- "psrlq $9, %%mm5\n\t"
- "pand %%mm7, %%mm2\n\t"
- "pand %%mm7, %%mm5\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm5, %%mm3\n\t"
- "psllq $16, %%mm3\n\t"
- "por %%mm3, %%mm0\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
- d += 4;
- s += 12;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- const int b= *s++;
- const int g= *s++;
- const int r= *s++;
- *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
- }
+ asm volatile(PREFETCH" %0"::"m"(*src):"memory");
+ asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 11;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 3%1, %%mm3 \n\t"
+ "punpckldq 6%1, %%mm0 \n\t"
+ "punpckldq 9%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psrlq $3, %%mm0 \n\t"
+ "psrlq $3, %%mm3 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm3 \n\t"
+ "psrlq $6, %%mm1 \n\t"
+ "psrlq $6, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $9, %%mm2 \n\t"
+ "psrlq $9, %%mm5 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "pand %%mm7, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ const int b = *s++;
+ const int g = *s++;
+ const int r = *s++;
+ *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+ }
}
static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint8_t *s = src;
- const uint8_t *end;
+ const uint8_t *s = src;
+ const uint8_t *end;
#ifdef HAVE_MMX
- const uint8_t *mm_end;
+ const uint8_t *mm_end;
#endif
- uint16_t *d = (uint16_t *)dst;
- end = s + src_size;
+ uint16_t *d = (uint16_t *)dst;
+ end = s + src_size;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*src):"memory");
- __asm __volatile(
- "movq %0, %%mm7\n\t"
- "movq %1, %%mm6\n\t"
- ::"m"(red_15mask),"m"(green_15mask));
- mm_end = end - 15;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movd %1, %%mm0\n\t"
- "movd 3%1, %%mm3\n\t"
- "punpckldq 6%1, %%mm0\n\t"
- "punpckldq 9%1, %%mm3\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm3, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "psllq $7, %%mm0\n\t"
- "psllq $7, %%mm3\n\t"
- "pand %%mm7, %%mm0\n\t"
- "pand %%mm7, %%mm3\n\t"
- "psrlq $6, %%mm1\n\t"
- "psrlq $6, %%mm4\n\t"
- "pand %%mm6, %%mm1\n\t"
- "pand %%mm6, %%mm4\n\t"
- "psrlq $19, %%mm2\n\t"
- "psrlq $19, %%mm5\n\t"
- "pand %2, %%mm2\n\t"
- "pand %2, %%mm5\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm5, %%mm3\n\t"
- "psllq $16, %%mm3\n\t"
- "por %%mm3, %%mm0\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
- d += 4;
- s += 12;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- const int r= *s++;
- const int g= *s++;
- const int b= *s++;
- *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
- }
+ asm volatile(PREFETCH" %0"::"m"(*src):"memory");
+ asm volatile(
+ "movq %0, %%mm7 \n\t"
+ "movq %1, %%mm6 \n\t"
+ ::"m"(red_15mask),"m"(green_15mask));
+ mm_end = end - 15;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movd %1, %%mm0 \n\t"
+ "movd 3%1, %%mm3 \n\t"
+ "punpckldq 6%1, %%mm0 \n\t"
+ "punpckldq 9%1, %%mm3 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm3, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "psllq $7, %%mm0 \n\t"
+ "psllq $7, %%mm3 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm7, %%mm3 \n\t"
+ "psrlq $6, %%mm1 \n\t"
+ "psrlq $6, %%mm4 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "psrlq $19, %%mm2 \n\t"
+ "psrlq $19, %%mm5 \n\t"
+ "pand %2, %%mm2 \n\t"
+ "pand %2, %%mm5 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "psllq $16, %%mm3 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ :"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
+ d += 4;
+ s += 12;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ const int r = *s++;
+ const int g = *s++;
+ const int b = *s++;
+ *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
+ }
}
/*
- I use here less accurate approximation by simply
- left-shifting the input
- value and filling the low order bits with
- zeroes. This method improves png's
- compression but this scheme cannot reproduce white exactly, since it does not
- generate an all-ones maximum value; the net effect is to darken the
+ I use less accurate approximation here by simply left-shifting the input
+ value and filling the low order bits with zeroes. This method improves PNG
+ compression but this scheme cannot reproduce white exactly, since it does
+ not generate an all-ones maximum value; the net effect is to darken the
image slightly.
The better method should be "left bit replication":
@@ -931,1758 +932,1806 @@ static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, long s
*/
static inline void RENAME(rgb15to24)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint16_t *end;
+ const uint16_t *end;
#ifdef HAVE_MMX
- const uint16_t *mm_end;
+ const uint16_t *mm_end;
#endif
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (uint16_t *)src;
- end = s + src_size/2;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- mm_end = end - 7;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq %1, %%mm1\n\t"
- "movq %1, %%mm2\n\t"
- "pand %2, %%mm0\n\t"
- "pand %3, %%mm1\n\t"
- "pand %4, %%mm2\n\t"
- "psllq $3, %%mm0\n\t"
- "psrlq $2, %%mm1\n\t"
- "psrlq $7, %%mm2\n\t"
- "movq %%mm0, %%mm3\n\t"
- "movq %%mm1, %%mm4\n\t"
- "movq %%mm2, %%mm5\n\t"
- "punpcklwd %5, %%mm0\n\t"
- "punpcklwd %5, %%mm1\n\t"
- "punpcklwd %5, %%mm2\n\t"
- "punpckhwd %5, %%mm3\n\t"
- "punpckhwd %5, %%mm4\n\t"
- "punpckhwd %5, %%mm5\n\t"
- "psllq $8, %%mm1\n\t"
- "psllq $16, %%mm2\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm2, %%mm0\n\t"
- "psllq $8, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm5, %%mm3\n\t"
-
- "movq %%mm0, %%mm6\n\t"
- "movq %%mm3, %%mm7\n\t"
-
- "movq 8%1, %%mm0\n\t"
- "movq 8%1, %%mm1\n\t"
- "movq 8%1, %%mm2\n\t"
- "pand %2, %%mm0\n\t"
- "pand %3, %%mm1\n\t"
- "pand %4, %%mm2\n\t"
- "psllq $3, %%mm0\n\t"
- "psrlq $2, %%mm1\n\t"
- "psrlq $7, %%mm2\n\t"
- "movq %%mm0, %%mm3\n\t"
- "movq %%mm1, %%mm4\n\t"
- "movq %%mm2, %%mm5\n\t"
- "punpcklwd %5, %%mm0\n\t"
- "punpcklwd %5, %%mm1\n\t"
- "punpcklwd %5, %%mm2\n\t"
- "punpckhwd %5, %%mm3\n\t"
- "punpckhwd %5, %%mm4\n\t"
- "punpckhwd %5, %%mm5\n\t"
- "psllq $8, %%mm1\n\t"
- "psllq $16, %%mm2\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm2, %%mm0\n\t"
- "psllq $8, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm5, %%mm3\n\t"
-
- :"=m"(*d)
- :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
- :"memory");
- /* Borrowed 32 to 24 */
- __asm __volatile(
- "movq %%mm0, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "movq %%mm6, %%mm0\n\t"
- "movq %%mm7, %%mm1\n\t"
-
- "movq %%mm4, %%mm6\n\t"
- "movq %%mm5, %%mm7\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
-
- "psrlq $8, %%mm2\n\t"
- "psrlq $8, %%mm3\n\t"
- "psrlq $8, %%mm6\n\t"
- "psrlq $8, %%mm7\n\t"
- "pand %2, %%mm0\n\t"
- "pand %2, %%mm1\n\t"
- "pand %2, %%mm4\n\t"
- "pand %2, %%mm5\n\t"
- "pand %3, %%mm2\n\t"
- "pand %3, %%mm3\n\t"
- "pand %3, %%mm6\n\t"
- "pand %3, %%mm7\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm3, %%mm1\n\t"
- "por %%mm6, %%mm4\n\t"
- "por %%mm7, %%mm5\n\t"
-
- "movq %%mm1, %%mm2\n\t"
- "movq %%mm4, %%mm3\n\t"
- "psllq $48, %%mm2\n\t"
- "psllq $32, %%mm3\n\t"
- "pand %4, %%mm2\n\t"
- "pand %5, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "psrlq $16, %%mm1\n\t"
- "psrlq $32, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm3, %%mm1\n\t"
- "pand %6, %%mm5\n\t"
- "por %%mm5, %%mm4\n\t"
-
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm1, 8%0\n\t"
- MOVNTQ" %%mm4, 16%0"
-
- :"=m"(*d)
- :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
- :"memory");
- d += 24;
- s += 8;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- register uint16_t bgr;
- bgr = *s++;
- *d++ = (bgr&0x1F)<<3;
- *d++ = (bgr&0x3E0)>>2;
- *d++ = (bgr&0x7C00)>>7;
- }
+ asm volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 7;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ "movq %1, %%mm2 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %3, %%mm1 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "psllq $3, %%mm0 \n\t"
+ "psrlq $2, %%mm1 \n\t"
+ "psrlq $7, %%mm2 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "movq %%mm2, %%mm5 \n\t"
+ "punpcklwd %5, %%mm0 \n\t"
+ "punpcklwd %5, %%mm1 \n\t"
+ "punpcklwd %5, %%mm2 \n\t"
+ "punpckhwd %5, %%mm3 \n\t"
+ "punpckhwd %5, %%mm4 \n\t"
+ "punpckhwd %5, %%mm5 \n\t"
+ "psllq $8, %%mm1 \n\t"
+ "psllq $16, %%mm2 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psllq $8, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+
+ "movq %%mm0, %%mm6 \n\t"
+ "movq %%mm3, %%mm7 \n\t"
+
+ "movq 8%1, %%mm0 \n\t"
+ "movq 8%1, %%mm1 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %3, %%mm1 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "psllq $3, %%mm0 \n\t"
+ "psrlq $2, %%mm1 \n\t"
+ "psrlq $7, %%mm2 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "movq %%mm2, %%mm5 \n\t"
+ "punpcklwd %5, %%mm0 \n\t"
+ "punpcklwd %5, %%mm1 \n\t"
+ "punpcklwd %5, %%mm2 \n\t"
+ "punpckhwd %5, %%mm3 \n\t"
+ "punpckhwd %5, %%mm4 \n\t"
+ "punpckhwd %5, %%mm5 \n\t"
+ "psllq $8, %%mm1 \n\t"
+ "psllq $16, %%mm2 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psllq $8, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
+ :"memory");
+ /* Borrowed 32 to 24 */
+ asm volatile(
+ "movq %%mm0, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "movq %%mm6, %%mm0 \n\t"
+ "movq %%mm7, %%mm1 \n\t"
+
+ "movq %%mm4, %%mm6 \n\t"
+ "movq %%mm5, %%mm7 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+
+ "psrlq $8, %%mm2 \n\t"
+ "psrlq $8, %%mm3 \n\t"
+ "psrlq $8, %%mm6 \n\t"
+ "psrlq $8, %%mm7 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm1 \n\t"
+ "pand %2, %%mm4 \n\t"
+ "pand %2, %%mm5 \n\t"
+ "pand %3, %%mm2 \n\t"
+ "pand %3, %%mm3 \n\t"
+ "pand %3, %%mm6 \n\t"
+ "pand %3, %%mm7 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm6, %%mm4 \n\t"
+ "por %%mm7, %%mm5 \n\t"
+
+ "movq %%mm1, %%mm2 \n\t"
+ "movq %%mm4, %%mm3 \n\t"
+ "psllq $48, %%mm2 \n\t"
+ "psllq $32, %%mm3 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "pand %5, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psrlq $16, %%mm1 \n\t"
+ "psrlq $32, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "pand %6, %%mm5 \n\t"
+ "por %%mm5, %%mm4 \n\t"
+
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm4, 16%0"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ d += 24;
+ s += 8;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ }
}
static inline void RENAME(rgb16to24)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint16_t *end;
+ const uint16_t *end;
#ifdef HAVE_MMX
- const uint16_t *mm_end;
+ const uint16_t *mm_end;
#endif
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (const uint16_t *)src;
- end = s + src_size/2;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- mm_end = end - 7;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq %1, %%mm1\n\t"
- "movq %1, %%mm2\n\t"
- "pand %2, %%mm0\n\t"
- "pand %3, %%mm1\n\t"
- "pand %4, %%mm2\n\t"
- "psllq $3, %%mm0\n\t"
- "psrlq $3, %%mm1\n\t"
- "psrlq $8, %%mm2\n\t"
- "movq %%mm0, %%mm3\n\t"
- "movq %%mm1, %%mm4\n\t"
- "movq %%mm2, %%mm5\n\t"
- "punpcklwd %5, %%mm0\n\t"
- "punpcklwd %5, %%mm1\n\t"
- "punpcklwd %5, %%mm2\n\t"
- "punpckhwd %5, %%mm3\n\t"
- "punpckhwd %5, %%mm4\n\t"
- "punpckhwd %5, %%mm5\n\t"
- "psllq $8, %%mm1\n\t"
- "psllq $16, %%mm2\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm2, %%mm0\n\t"
- "psllq $8, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm5, %%mm3\n\t"
-
- "movq %%mm0, %%mm6\n\t"
- "movq %%mm3, %%mm7\n\t"
-
- "movq 8%1, %%mm0\n\t"
- "movq 8%1, %%mm1\n\t"
- "movq 8%1, %%mm2\n\t"
- "pand %2, %%mm0\n\t"
- "pand %3, %%mm1\n\t"
- "pand %4, %%mm2\n\t"
- "psllq $3, %%mm0\n\t"
- "psrlq $3, %%mm1\n\t"
- "psrlq $8, %%mm2\n\t"
- "movq %%mm0, %%mm3\n\t"
- "movq %%mm1, %%mm4\n\t"
- "movq %%mm2, %%mm5\n\t"
- "punpcklwd %5, %%mm0\n\t"
- "punpcklwd %5, %%mm1\n\t"
- "punpcklwd %5, %%mm2\n\t"
- "punpckhwd %5, %%mm3\n\t"
- "punpckhwd %5, %%mm4\n\t"
- "punpckhwd %5, %%mm5\n\t"
- "psllq $8, %%mm1\n\t"
- "psllq $16, %%mm2\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm2, %%mm0\n\t"
- "psllq $8, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm5, %%mm3\n\t"
- :"=m"(*d)
- :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
- :"memory");
- /* Borrowed 32 to 24 */
- __asm __volatile(
- "movq %%mm0, %%mm4\n\t"
- "movq %%mm3, %%mm5\n\t"
- "movq %%mm6, %%mm0\n\t"
- "movq %%mm7, %%mm1\n\t"
-
- "movq %%mm4, %%mm6\n\t"
- "movq %%mm5, %%mm7\n\t"
- "movq %%mm0, %%mm2\n\t"
- "movq %%mm1, %%mm3\n\t"
-
- "psrlq $8, %%mm2\n\t"
- "psrlq $8, %%mm3\n\t"
- "psrlq $8, %%mm6\n\t"
- "psrlq $8, %%mm7\n\t"
- "pand %2, %%mm0\n\t"
- "pand %2, %%mm1\n\t"
- "pand %2, %%mm4\n\t"
- "pand %2, %%mm5\n\t"
- "pand %3, %%mm2\n\t"
- "pand %3, %%mm3\n\t"
- "pand %3, %%mm6\n\t"
- "pand %3, %%mm7\n\t"
- "por %%mm2, %%mm0\n\t"
- "por %%mm3, %%mm1\n\t"
- "por %%mm6, %%mm4\n\t"
- "por %%mm7, %%mm5\n\t"
-
- "movq %%mm1, %%mm2\n\t"
- "movq %%mm4, %%mm3\n\t"
- "psllq $48, %%mm2\n\t"
- "psllq $32, %%mm3\n\t"
- "pand %4, %%mm2\n\t"
- "pand %5, %%mm3\n\t"
- "por %%mm2, %%mm0\n\t"
- "psrlq $16, %%mm1\n\t"
- "psrlq $32, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm3, %%mm1\n\t"
- "pand %6, %%mm5\n\t"
- "por %%mm5, %%mm4\n\t"
-
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm1, 8%0\n\t"
- MOVNTQ" %%mm4, 16%0"
-
- :"=m"(*d)
- :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
- :"memory");
- d += 24;
- s += 8;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- register uint16_t bgr;
- bgr = *s++;
- *d++ = (bgr&0x1F)<<3;
- *d++ = (bgr&0x7E0)>>3;
- *d++ = (bgr&0xF800)>>8;
- }
+ asm volatile(PREFETCH" %0"::"m"(*s):"memory");
+ mm_end = end - 7;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ "movq %1, %%mm2 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %3, %%mm1 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "psllq $3, %%mm0 \n\t"
+ "psrlq $3, %%mm1 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "movq %%mm2, %%mm5 \n\t"
+ "punpcklwd %5, %%mm0 \n\t"
+ "punpcklwd %5, %%mm1 \n\t"
+ "punpcklwd %5, %%mm2 \n\t"
+ "punpckhwd %5, %%mm3 \n\t"
+ "punpckhwd %5, %%mm4 \n\t"
+ "punpckhwd %5, %%mm5 \n\t"
+ "psllq $8, %%mm1 \n\t"
+ "psllq $16, %%mm2 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psllq $8, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+
+ "movq %%mm0, %%mm6 \n\t"
+ "movq %%mm3, %%mm7 \n\t"
+
+ "movq 8%1, %%mm0 \n\t"
+ "movq 8%1, %%mm1 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %3, %%mm1 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "psllq $3, %%mm0 \n\t"
+ "psrlq $3, %%mm1 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "movq %%mm2, %%mm5 \n\t"
+ "punpcklwd %5, %%mm0 \n\t"
+ "punpcklwd %5, %%mm1 \n\t"
+ "punpcklwd %5, %%mm2 \n\t"
+ "punpckhwd %5, %%mm3 \n\t"
+ "punpckhwd %5, %%mm4 \n\t"
+ "punpckhwd %5, %%mm5 \n\t"
+ "psllq $8, %%mm1 \n\t"
+ "psllq $16, %%mm2 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psllq $8, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
+ :"memory");
+ /* Borrowed 32 to 24 */
+ asm volatile(
+ "movq %%mm0, %%mm4 \n\t"
+ "movq %%mm3, %%mm5 \n\t"
+ "movq %%mm6, %%mm0 \n\t"
+ "movq %%mm7, %%mm1 \n\t"
+
+ "movq %%mm4, %%mm6 \n\t"
+ "movq %%mm5, %%mm7 \n\t"
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm3 \n\t"
+
+ "psrlq $8, %%mm2 \n\t"
+ "psrlq $8, %%mm3 \n\t"
+ "psrlq $8, %%mm6 \n\t"
+ "psrlq $8, %%mm7 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %2, %%mm1 \n\t"
+ "pand %2, %%mm4 \n\t"
+ "pand %2, %%mm5 \n\t"
+ "pand %3, %%mm2 \n\t"
+ "pand %3, %%mm3 \n\t"
+ "pand %3, %%mm6 \n\t"
+ "pand %3, %%mm7 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm6, %%mm4 \n\t"
+ "por %%mm7, %%mm5 \n\t"
+
+ "movq %%mm1, %%mm2 \n\t"
+ "movq %%mm4, %%mm3 \n\t"
+ "psllq $48, %%mm2 \n\t"
+ "psllq $32, %%mm3 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "pand %5, %%mm3 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psrlq $16, %%mm1 \n\t"
+ "psrlq $32, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "pand %6, %%mm5 \n\t"
+ "por %%mm5, %%mm4 \n\t"
+
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm4, 16%0"
+
+ :"=m"(*d)
+ :"m"(*s),"m"(mask24l),"m"(mask24h),"m"(mask24hh),"m"(mask24hhh),"m"(mask24hhhh)
+ :"memory");
+ d += 24;
+ s += 8;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ }
}
static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint16_t *end;
+ const uint16_t *end;
#ifdef HAVE_MMX
- const uint16_t *mm_end;
+ const uint16_t *mm_end;
#endif
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (const uint16_t *)src;
- end = s + src_size/2;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (const uint16_t *)src;
+ end = s + src_size/2;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory");
- mm_end = end - 3;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq %1, %%mm1\n\t"
- "movq %1, %%mm2\n\t"
- "pand %2, %%mm0\n\t"
- "pand %3, %%mm1\n\t"
- "pand %4, %%mm2\n\t"
- "psllq $3, %%mm0\n\t"
- "psrlq $2, %%mm1\n\t"
- "psrlq $7, %%mm2\n\t"
- "movq %%mm0, %%mm3\n\t"
- "movq %%mm1, %%mm4\n\t"
- "movq %%mm2, %%mm5\n\t"
- "punpcklwd %%mm7, %%mm0\n\t"
- "punpcklwd %%mm7, %%mm1\n\t"
- "punpcklwd %%mm7, %%mm2\n\t"
- "punpckhwd %%mm7, %%mm3\n\t"
- "punpckhwd %%mm7, %%mm4\n\t"
- "punpckhwd %%mm7, %%mm5\n\t"
- "psllq $8, %%mm1\n\t"
- "psllq $16, %%mm2\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm2, %%mm0\n\t"
- "psllq $8, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm5, %%mm3\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm3, 8%0\n\t"
- :"=m"(*d)
- :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
- :"memory");
- d += 16;
- s += 4;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
-#if 0 //slightly slower on athlon
- int bgr= *s++;
- *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9);
+ asm volatile(PREFETCH" %0"::"m"(*s):"memory");
+ asm volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
+ mm_end = end - 3;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ "movq %1, %%mm2 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %3, %%mm1 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "psllq $3, %%mm0 \n\t"
+ "psrlq $2, %%mm1 \n\t"
+ "psrlq $7, %%mm2 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "movq %%mm2, %%mm5 \n\t"
+ "punpcklwd %%mm7, %%mm0 \n\t"
+ "punpcklwd %%mm7, %%mm1 \n\t"
+ "punpcklwd %%mm7, %%mm2 \n\t"
+ "punpckhwd %%mm7, %%mm3 \n\t"
+ "punpckhwd %%mm7, %%mm4 \n\t"
+ "punpckhwd %%mm7, %%mm5 \n\t"
+ "psllq $8, %%mm1 \n\t"
+ "psllq $16, %%mm2 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psllq $8, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm3, 8%0 \n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
+ :"memory");
+ d += 16;
+ s += 4;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+#if 0 //slightly slower on Athlon
+ int bgr= *s++;
+ *((uint32_t*)d)++ = ((bgr&0x1F)<<3) + ((bgr&0x3E0)<<6) + ((bgr&0x7C00)<<9);
#else
- register uint16_t bgr;
- bgr = *s++;
+ register uint16_t bgr;
+ bgr = *s++;
#ifdef WORDS_BIGENDIAN
- *d++ = 0;
- *d++ = (bgr&0x7C00)>>7;
- *d++ = (bgr&0x3E0)>>2;
- *d++ = (bgr&0x1F)<<3;
+ *d++ = 0;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x1F)<<3;
#else
- *d++ = (bgr&0x1F)<<3;
- *d++ = (bgr&0x3E0)>>2;
- *d++ = (bgr&0x7C00)>>7;
- *d++ = 0;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x3E0)>>2;
+ *d++ = (bgr&0x7C00)>>7;
+ *d++ = 0;
#endif
#endif
- }
+ }
}
static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, long src_size)
{
- const uint16_t *end;
+ const uint16_t *end;
#ifdef HAVE_MMX
- const uint16_t *mm_end;
+ const uint16_t *mm_end;
#endif
- uint8_t *d = (uint8_t *)dst;
- const uint16_t *s = (uint16_t *)src;
- end = s + src_size/2;
+ uint8_t *d = (uint8_t *)dst;
+ const uint16_t *s = (uint16_t *)src;
+ end = s + src_size/2;
#ifdef HAVE_MMX
- __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
- __asm __volatile("pxor %%mm7,%%mm7\n\t":::"memory");
- mm_end = end - 3;
- while(s < mm_end)
- {
- __asm __volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq %1, %%mm1\n\t"
- "movq %1, %%mm2\n\t"
- "pand %2, %%mm0\n\t"
- "pand %3, %%mm1\n\t"
- "pand %4, %%mm2\n\t"
- "psllq $3, %%mm0\n\t"
- "psrlq $3, %%mm1\n\t"
- "psrlq $8, %%mm2\n\t"
- "movq %%mm0, %%mm3\n\t"
- "movq %%mm1, %%mm4\n\t"
- "movq %%mm2, %%mm5\n\t"
- "punpcklwd %%mm7, %%mm0\n\t"
- "punpcklwd %%mm7, %%mm1\n\t"
- "punpcklwd %%mm7, %%mm2\n\t"
- "punpckhwd %%mm7, %%mm3\n\t"
- "punpckhwd %%mm7, %%mm4\n\t"
- "punpckhwd %%mm7, %%mm5\n\t"
- "psllq $8, %%mm1\n\t"
- "psllq $16, %%mm2\n\t"
- "por %%mm1, %%mm0\n\t"
- "por %%mm2, %%mm0\n\t"
- "psllq $8, %%mm4\n\t"
- "psllq $16, %%mm5\n\t"
- "por %%mm4, %%mm3\n\t"
- "por %%mm5, %%mm3\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm3, 8%0\n\t"
- :"=m"(*d)
- :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
- :"memory");
- d += 16;
- s += 4;
- }
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#endif
- while(s < end)
- {
- register uint16_t bgr;
- bgr = *s++;
+ asm volatile(PREFETCH" %0"::"m"(*s):"memory");
+ asm volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
+ mm_end = end - 3;
+ while (s < mm_end)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq %1, %%mm1 \n\t"
+ "movq %1, %%mm2 \n\t"
+ "pand %2, %%mm0 \n\t"
+ "pand %3, %%mm1 \n\t"
+ "pand %4, %%mm2 \n\t"
+ "psllq $3, %%mm0 \n\t"
+ "psrlq $3, %%mm1 \n\t"
+ "psrlq $8, %%mm2 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "movq %%mm2, %%mm5 \n\t"
+ "punpcklwd %%mm7, %%mm0 \n\t"
+ "punpcklwd %%mm7, %%mm1 \n\t"
+ "punpcklwd %%mm7, %%mm2 \n\t"
+ "punpckhwd %%mm7, %%mm3 \n\t"
+ "punpckhwd %%mm7, %%mm4 \n\t"
+ "punpckhwd %%mm7, %%mm5 \n\t"
+ "psllq $8, %%mm1 \n\t"
+ "psllq $16, %%mm2 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "psllq $8, %%mm4 \n\t"
+ "psllq $16, %%mm5 \n\t"
+ "por %%mm4, %%mm3 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm3, 8%0 \n\t"
+ :"=m"(*d)
+ :"m"(*s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
+ :"memory");
+ d += 16;
+ s += 4;
+ }
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+#endif
+ while (s < end)
+ {
+ register uint16_t bgr;
+ bgr = *s++;
#ifdef WORDS_BIGENDIAN
- *d++ = 0;
- *d++ = (bgr&0xF800)>>8;
- *d++ = (bgr&0x7E0)>>3;
- *d++ = (bgr&0x1F)<<3;
+ *d++ = 0;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0x1F)<<3;
#else
- *d++ = (bgr&0x1F)<<3;
- *d++ = (bgr&0x7E0)>>3;
- *d++ = (bgr&0xF800)>>8;
- *d++ = 0;
+ *d++ = (bgr&0x1F)<<3;
+ *d++ = (bgr&0x7E0)>>3;
+ *d++ = (bgr&0xF800)>>8;
+ *d++ = 0;
#endif
- }
+ }
}
static inline void RENAME(rgb32tobgr32)(const uint8_t *src, uint8_t *dst, long src_size)
{
+ long idx = 15 - src_size;
+ uint8_t *s = (uint8_t *) src-idx, *d = dst-idx;
#ifdef HAVE_MMX
-/* TODO: unroll this loop */
- asm volatile (
- "xor %%"REG_a", %%"REG_a" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 32(%0, %%"REG_a") \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm0, %%mm2 \n\t"
- "pslld $16, %%mm0 \n\t"
- "psrld $16, %%mm1 \n\t"
- "pand "MANGLE(mask32r)", %%mm0 \n\t"
- "pand "MANGLE(mask32g)", %%mm2 \n\t"
- "pand "MANGLE(mask32b)", %%mm1 \n\t"
- "por %%mm0, %%mm2 \n\t"
- "por %%mm1, %%mm2 \n\t"
- MOVNTQ" %%mm2, (%1, %%"REG_a") \n\t"
- "add $8, %%"REG_a" \n\t"
- "cmp %2, %%"REG_a" \n\t"
- " jb 1b \n\t"
- :: "r" (src), "r"(dst), "r" (src_size-7)
- : "%"REG_a
- );
-
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-#else
- unsigned i;
- unsigned num_pixels = src_size >> 2;
- for(i=0; i<num_pixels; i++)
- {
-#ifdef WORDS_BIGENDIAN
- dst[4*i + 1] = src[4*i + 3];
- dst[4*i + 2] = src[4*i + 2];
- dst[4*i + 3] = src[4*i + 1];
-#else
- dst[4*i + 0] = src[4*i + 2];
- dst[4*i + 1] = src[4*i + 1];
- dst[4*i + 2] = src[4*i + 0];
-#endif
- }
-#endif
+ asm volatile(
+ "test %0, %0 \n\t"
+ "jns 2f \n\t"
+ PREFETCH" (%1, %0) \n\t"
+ "movq %3, %%mm7 \n\t"
+ "pxor %4, %%mm7 \n\t"
+ "movq %%mm7, %%mm6 \n\t"
+ "pxor %5, %%mm7 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1, %0) \n\t"
+ "movq (%1, %0), %%mm0 \n\t"
+ "movq 8(%1, %0), %%mm1 \n\t"
+# ifdef HAVE_MMX2
+ "pshufw $177, %%mm0, %%mm3 \n\t"
+ "pshufw $177, %%mm1, %%mm5 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm6, %%mm3 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm6, %%mm5 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ "por %%mm5, %%mm1 \n\t"
+# else
+ "movq %%mm0, %%mm2 \n\t"
+ "movq %%mm1, %%mm4 \n\t"
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm6, %%mm2 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm6, %%mm4 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "pslld $16, %%mm2 \n\t"
+ "psrld $16, %%mm3 \n\t"
+ "pslld $16, %%mm4 \n\t"
+ "psrld $16, %%mm5 \n\t"
+ "por %%mm2, %%mm0 \n\t"
+ "por %%mm4, %%mm1 \n\t"
+ "por %%mm3, %%mm0 \n\t"
+ "por %%mm5, %%mm1 \n\t"
+# endif
+ MOVNTQ" %%mm0, (%2, %0) \n\t"
+ MOVNTQ" %%mm1, 8(%2, %0) \n\t"
+ "add $16, %0 \n\t"
+ "js 1b \n\t"
+ SFENCE" \n\t"
+ EMMS" \n\t"
+ "2: \n\t"
+ : "+&r"(idx)
+ : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one)
+ : "memory");
+#endif
+ for (; idx<15; idx+=4) {
+ register int v = *(uint32_t *)&s[idx], g = v & 0xff00ff00;
+ v &= 0xff00ff;
+ *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16);
+ }
}
static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
- unsigned i;
+ unsigned i;
#ifdef HAVE_MMX
- long mmx_size= 23 - src_size;
- asm volatile (
- "movq "MANGLE(mask24r)", %%mm5 \n\t"
- "movq "MANGLE(mask24g)", %%mm6 \n\t"
- "movq "MANGLE(mask24b)", %%mm7 \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 32(%1, %%"REG_a") \n\t"
- "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
- "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
- "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
- "psllq $16, %%mm0 \n\t" // 00 BGR BGR
- "pand %%mm5, %%mm0 \n\t"
- "pand %%mm6, %%mm1 \n\t"
- "pand %%mm7, %%mm2 \n\t"
- "por %%mm0, %%mm1 \n\t"
- "por %%mm2, %%mm1 \n\t"
- "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
- MOVNTQ" %%mm1, (%2, %%"REG_a")\n\t" // RGB RGB RG
- "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
- "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
- "pand %%mm7, %%mm0 \n\t"
- "pand %%mm5, %%mm1 \n\t"
- "pand %%mm6, %%mm2 \n\t"
- "por %%mm0, %%mm1 \n\t"
- "por %%mm2, %%mm1 \n\t"
- "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
- MOVNTQ" %%mm1, 8(%2, %%"REG_a")\n\t" // B RGB RGB R
- "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
- "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
- "pand %%mm6, %%mm0 \n\t"
- "pand %%mm7, %%mm1 \n\t"
- "pand %%mm5, %%mm2 \n\t"
- "por %%mm0, %%mm1 \n\t"
- "por %%mm2, %%mm1 \n\t"
- MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t"
- "add $24, %%"REG_a" \n\t"
- " js 1b \n\t"
- : "+a" (mmx_size)
- : "r" (src-mmx_size), "r"(dst-mmx_size)
- );
-
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
-
- if(mmx_size==23) return; //finihsed, was multiple of 8
-
- src+= src_size;
- dst+= src_size;
- src_size= 23-mmx_size;
- src-= src_size;
- dst-= src_size;
-#endif
- for(i=0; i<src_size; i+=3)
- {
- register uint8_t x;
- x = src[i + 2];
- dst[i + 1] = src[i + 1];
- dst[i + 2] = src[i + 0];
- dst[i + 0] = x;
- }
+ long mmx_size= 23 - src_size;
+ asm volatile (
+ "test %%"REG_a", %%"REG_a" \n\t"
+ "jns 2f \n\t"
+ "movq "MANGLE(mask24r)", %%mm5 \n\t"
+ "movq "MANGLE(mask24g)", %%mm6 \n\t"
+ "movq "MANGLE(mask24b)", %%mm7 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a") \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
+ "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
+ "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
+ "psllq $16, %%mm0 \n\t" // 00 BGR BGR
+ "pand %%mm5, %%mm0 \n\t"
+ "pand %%mm6, %%mm1 \n\t"
+ "pand %%mm7, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
+ MOVNTQ" %%mm1, (%2, %%"REG_a") \n\t" // RGB RGB RG
+ "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
+ "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
+ "pand %%mm7, %%mm0 \n\t"
+ "pand %%mm5, %%mm1 \n\t"
+ "pand %%mm6, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
+ MOVNTQ" %%mm1, 8(%2, %%"REG_a") \n\t" // B RGB RGB R
+ "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
+ "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
+ "pand %%mm6, %%mm0 \n\t"
+ "pand %%mm7, %%mm1 \n\t"
+ "pand %%mm5, %%mm2 \n\t"
+ "por %%mm0, %%mm1 \n\t"
+ "por %%mm2, %%mm1 \n\t"
+ MOVNTQ" %%mm1, 16(%2, %%"REG_a") \n\t"
+ "add $24, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ "2: \n\t"
+ : "+a" (mmx_size)
+ : "r" (src-mmx_size), "r"(dst-mmx_size)
+ );
+
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
+
+ if (mmx_size==23) return; //finihsed, was multiple of 8
+
+ src+= src_size;
+ dst+= src_size;
+ src_size= 23-mmx_size;
+ src-= src_size;
+ dst-= src_size;
+#endif
+ for (i=0; i<src_size; i+=3)
+ {
+ register uint8_t x;
+ x = src[i + 2];
+ dst[i + 1] = src[i + 1];
+ dst[i + 2] = src[i + 0];
+ dst[i + 0] = x;
+ }
}
static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
+ long width, long height,
+ long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
{
- long y;
- const long chromWidth= width>>1;
- for(y=0; y<height; y++)
- {
+ long y;
+ const long chromWidth= width>>1;
+ for (y=0; y<height; y++)
+ {
#ifdef HAVE_MMX
-//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
- asm volatile(
- "xor %%"REG_a", %%"REG_a" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
- PREFETCH" 32(%2, %%"REG_a") \n\t"
- PREFETCH" 32(%3, %%"REG_a") \n\t"
- "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
- "movq %%mm0, %%mm2 \n\t" // U(0)
- "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
- "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
- "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
-
- "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
- "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
- "movq %%mm3, %%mm4 \n\t" // Y(0)
- "movq %%mm5, %%mm6 \n\t" // Y(8)
- "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
- "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
- "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
- "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
-
- MOVNTQ" %%mm3, (%0, %%"REG_a", 4)\n\t"
- MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
- MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4)\n\t"
- MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
-
- "add $8, %%"REG_a" \n\t"
- "cmp %4, %%"REG_a" \n\t"
- " jb 1b \n\t"
- ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
- : "%"REG_a
- );
+//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
+ PREFETCH" 32(%2, %%"REG_a") \n\t"
+ PREFETCH" 32(%3, %%"REG_a") \n\t"
+ "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
+ "movq %%mm0, %%mm2 \n\t" // U(0)
+ "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
+ "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
+
+ "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
+ "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
+ "movq %%mm3, %%mm4 \n\t" // Y(0)
+ "movq %%mm5, %%mm6 \n\t" // Y(8)
+ "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
+ "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
+ "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
+ "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
+
+ MOVNTQ" %%mm3, (%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
+ : "%"REG_a
+ );
#else
#if defined ARCH_ALPHA && defined HAVE_MVI
-#define pl2yuy2(n) \
- y1 = yc[n]; \
- y2 = yc2[n]; \
- u = uc[n]; \
- v = vc[n]; \
- asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \
- asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \
- asm("unpkbl %1, %0" : "=r"(u) : "r"(u)); \
- asm("unpkbl %1, %0" : "=r"(v) : "r"(v)); \
- yuv1 = (u << 8) + (v << 24); \
- yuv2 = yuv1 + y2; \
- yuv1 += y1; \
- qdst[n] = yuv1; \
- qdst2[n] = yuv2;
-
- int i;
- uint64_t *qdst = (uint64_t *) dst;
- uint64_t *qdst2 = (uint64_t *) (dst + dstStride);
- const uint32_t *yc = (uint32_t *) ysrc;
- const uint32_t *yc2 = (uint32_t *) (ysrc + lumStride);
- const uint16_t *uc = (uint16_t*) usrc, *vc = (uint16_t*) vsrc;
- for(i = 0; i < chromWidth; i += 8){
- uint64_t y1, y2, yuv1, yuv2;
- uint64_t u, v;
- /* Prefetch */
- asm("ldq $31,64(%0)" :: "r"(yc));
- asm("ldq $31,64(%0)" :: "r"(yc2));
- asm("ldq $31,64(%0)" :: "r"(uc));
- asm("ldq $31,64(%0)" :: "r"(vc));
-
- pl2yuy2(0);
- pl2yuy2(1);
- pl2yuy2(2);
- pl2yuy2(3);
-
- yc += 4;
- yc2 += 4;
- uc += 4;
- vc += 4;
- qdst += 4;
- qdst2 += 4;
- }
- y++;
- ysrc += lumStride;
- dst += dstStride;
+#define pl2yuy2(n) \
+ y1 = yc[n]; \
+ y2 = yc2[n]; \
+ u = uc[n]; \
+ v = vc[n]; \
+ asm("unpkbw %1, %0" : "=r"(y1) : "r"(y1)); \
+ asm("unpkbw %1, %0" : "=r"(y2) : "r"(y2)); \
+ asm("unpkbl %1, %0" : "=r"(u) : "r"(u)); \
+ asm("unpkbl %1, %0" : "=r"(v) : "r"(v)); \
+ yuv1 = (u << 8) + (v << 24); \
+ yuv2 = yuv1 + y2; \
+ yuv1 += y1; \
+ qdst[n] = yuv1; \
+ qdst2[n] = yuv2;
+
+ int i;
+ uint64_t *qdst = (uint64_t *) dst;
+ uint64_t *qdst2 = (uint64_t *) (dst + dstStride);
+ const uint32_t *yc = (uint32_t *) ysrc;
+ const uint32_t *yc2 = (uint32_t *) (ysrc + lumStride);
+ const uint16_t *uc = (uint16_t*) usrc, *vc = (uint16_t*) vsrc;
+ for (i = 0; i < chromWidth; i += 8){
+ uint64_t y1, y2, yuv1, yuv2;
+ uint64_t u, v;
+ /* Prefetch */
+ asm("ldq $31,64(%0)" :: "r"(yc));
+ asm("ldq $31,64(%0)" :: "r"(yc2));
+ asm("ldq $31,64(%0)" :: "r"(uc));
+ asm("ldq $31,64(%0)" :: "r"(vc));
+
+ pl2yuy2(0);
+ pl2yuy2(1);
+ pl2yuy2(2);
+ pl2yuy2(3);
+
+ yc += 4;
+ yc2 += 4;
+ uc += 4;
+ vc += 4;
+ qdst += 4;
+ qdst2 += 4;
+ }
+ y++;
+ ysrc += lumStride;
+ dst += dstStride;
#elif __WORDSIZE >= 64
- int i;
- uint64_t *ldst = (uint64_t *) dst;
- const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
- for(i = 0; i < chromWidth; i += 2){
- uint64_t k, l;
- k = yc[0] + (uc[0] << 8) +
- (yc[1] << 16) + (vc[0] << 24);
- l = yc[2] + (uc[1] << 8) +
- (yc[3] << 16) + (vc[1] << 24);
- *ldst++ = k + (l << 32);
- yc += 4;
- uc += 2;
- vc += 2;
- }
+ int i;
+ uint64_t *ldst = (uint64_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for (i = 0; i < chromWidth; i += 2){
+ uint64_t k, l;
+ k = yc[0] + (uc[0] << 8) +
+ (yc[1] << 16) + (vc[0] << 24);
+ l = yc[2] + (uc[1] << 8) +
+ (yc[3] << 16) + (vc[1] << 24);
+ *ldst++ = k + (l << 32);
+ yc += 4;
+ uc += 2;
+ vc += 2;
+ }
#else
- int i, *idst = (int32_t *) dst;
- const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
- for(i = 0; i < chromWidth; i++){
+ int i, *idst = (int32_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for (i = 0; i < chromWidth; i++){
#ifdef WORDS_BIGENDIAN
- *idst++ = (yc[0] << 24)+ (uc[0] << 16) +
- (yc[1] << 8) + (vc[0] << 0);
+ *idst++ = (yc[0] << 24)+ (uc[0] << 16) +
+ (yc[1] << 8) + (vc[0] << 0);
#else
- *idst++ = yc[0] + (uc[0] << 8) +
- (yc[1] << 16) + (vc[0] << 24);
-#endif
- yc += 2;
- uc++;
- vc++;
- }
-#endif
-#endif
- if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
- {
- usrc += chromStride;
- vsrc += chromStride;
- }
- ysrc += lumStride;
- dst += dstStride;
- }
+ *idst++ = yc[0] + (uc[0] << 8) +
+ (yc[1] << 16) + (vc[0] << 24);
+#endif
+ yc += 2;
+ uc++;
+ vc++;
+ }
+#endif
+#endif
+ if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1)
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
#ifdef HAVE_MMX
-asm( EMMS" \n\t"
- SFENCE" \n\t"
+asm( EMMS" \n\t"
+ SFENCE" \n\t"
:::"memory");
#endif
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
- * problem for anyone then tell me, and ill fix it)
+ * Height should be a multiple of 2 and width should be a multiple of 16 (if
+ * this is a problem for anyone then tell me, and I will fix it).
*/
static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride)
+ long width, long height,
+ long lumStride, long chromStride, long dstStride)
{
- //FIXME interpolate chroma
- RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+ //FIXME interpolate chroma
+ RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
}
static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
+ long width, long height,
+ long lumStride, long chromStride, long dstStride, long vertLumPerChroma)
{
- long y;
- const long chromWidth= width>>1;
- for(y=0; y<height; y++)
- {
+ long y;
+ const long chromWidth= width>>1;
+ for (y=0; y<height; y++)
+ {
#ifdef HAVE_MMX
-//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
- asm volatile(
- "xor %%"REG_a", %%"REG_a" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
- PREFETCH" 32(%2, %%"REG_a") \n\t"
- PREFETCH" 32(%3, %%"REG_a") \n\t"
- "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
- "movq %%mm0, %%mm2 \n\t" // U(0)
- "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
- "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
- "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
-
- "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
- "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
- "movq %%mm0, %%mm4 \n\t" // Y(0)
- "movq %%mm2, %%mm6 \n\t" // Y(8)
- "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
- "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
- "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
- "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
-
- MOVNTQ" %%mm0, (%0, %%"REG_a", 4)\n\t"
- MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
- MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4)\n\t"
- MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
-
- "add $8, %%"REG_a" \n\t"
- "cmp %4, %%"REG_a" \n\t"
- " jb 1b \n\t"
- ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
- : "%"REG_a
- );
+//FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
+ PREFETCH" 32(%2, %%"REG_a") \n\t"
+ PREFETCH" 32(%3, %%"REG_a") \n\t"
+ "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
+ "movq %%mm0, %%mm2 \n\t" // U(0)
+ "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
+ "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
+
+ "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
+ "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
+ "movq %%mm0, %%mm4 \n\t" // Y(0)
+ "movq %%mm2, %%mm6 \n\t" // Y(8)
+ "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
+ "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
+ "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
+ "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
+
+ MOVNTQ" %%mm0, (%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4) \n\t"
+ MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
+ : "%"REG_a
+ );
#else
-//FIXME adapt the alpha asm code from yv12->yuy2
+//FIXME adapt the Alpha ASM code from yv12->yuy2
#if __WORDSIZE >= 64
- int i;
- uint64_t *ldst = (uint64_t *) dst;
- const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
- for(i = 0; i < chromWidth; i += 2){
- uint64_t k, l;
- k = uc[0] + (yc[0] << 8) +
- (vc[0] << 16) + (yc[1] << 24);
- l = uc[1] + (yc[2] << 8) +
- (vc[1] << 16) + (yc[3] << 24);
- *ldst++ = k + (l << 32);
- yc += 4;
- uc += 2;
- vc += 2;
- }
+ int i;
+ uint64_t *ldst = (uint64_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for (i = 0; i < chromWidth; i += 2){
+ uint64_t k, l;
+ k = uc[0] + (yc[0] << 8) +
+ (vc[0] << 16) + (yc[1] << 24);
+ l = uc[1] + (yc[2] << 8) +
+ (vc[1] << 16) + (yc[3] << 24);
+ *ldst++ = k + (l << 32);
+ yc += 4;
+ uc += 2;
+ vc += 2;
+ }
#else
- int i, *idst = (int32_t *) dst;
- const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
- for(i = 0; i < chromWidth; i++){
+ int i, *idst = (int32_t *) dst;
+ const uint8_t *yc = ysrc, *uc = usrc, *vc = vsrc;
+ for (i = 0; i < chromWidth; i++){
#ifdef WORDS_BIGENDIAN
- *idst++ = (uc[0] << 24)+ (yc[0] << 16) +
- (vc[0] << 8) + (yc[1] << 0);
+ *idst++ = (uc[0] << 24)+ (yc[0] << 16) +
+ (vc[0] << 8) + (yc[1] << 0);
#else
- *idst++ = uc[0] + (yc[0] << 8) +
- (vc[0] << 16) + (yc[1] << 24);
-#endif
- yc += 2;
- uc++;
- vc++;
- }
-#endif
-#endif
- if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
- {
- usrc += chromStride;
- vsrc += chromStride;
- }
- ysrc += lumStride;
- dst += dstStride;
- }
+ *idst++ = uc[0] + (yc[0] << 8) +
+ (vc[0] << 16) + (yc[1] << 24);
+#endif
+ yc += 2;
+ uc++;
+ vc++;
+ }
+#endif
+#endif
+ if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1)
+ {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
+ }
#ifdef HAVE_MMX
-asm( EMMS" \n\t"
- SFENCE" \n\t"
+asm( EMMS" \n\t"
+ SFENCE" \n\t"
:::"memory");
#endif
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
- * problem for anyone then tell me, and ill fix it)
+ * Height should be a multiple of 2 and width should be a multiple of 16 (if
+ * this is a problem for anyone then tell me, and I will fix it).
*/
static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride)
+ long width, long height,
+ long lumStride, long chromStride, long dstStride)
{
- //FIXME interpolate chroma
- RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
+ //FIXME interpolate chroma
+ RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
}
/**
- *
- * width should be a multiple of 16
+ * Width should be a multiple of 16.
*/
static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
- long width, long height,
- long lumStride, long chromStride, long dstStride)
+ long width, long height,
+ long lumStride, long chromStride, long dstStride)
{
- RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
+ RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
- * problem for anyone then tell me, and ill fix it)
+ * Height should be a multiple of 2 and width should be a multiple of 16 (if
+ * this is a problem for anyone then tell me, and I will fix it).
*/
static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
- long width, long height,
- long lumStride, long chromStride, long srcStride)
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
{
- long y;
- const long chromWidth= width>>1;
- for(y=0; y<height; y+=2)
- {
+ long y;
+ const long chromWidth= width>>1;
+ for (y=0; y<height; y+=2)
+ {
#ifdef HAVE_MMX
- asm volatile(
- "xor %%"REG_a", %%"REG_a" \n\t"
- "pcmpeqw %%mm7, %%mm7 \n\t"
- "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
- "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
- "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
- "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
- "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
- "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
- "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
- "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
- "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
- "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
- "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
-
- MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t"
-
- "movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8)
- "movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12)
- "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
- "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
- "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
- "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
- "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
- "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
- "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
- "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
-
- MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t"
-
- "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
- "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
- "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
- "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
- "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
- "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
- "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
- "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
-
- MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
- MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
-
- "add $8, %%"REG_a" \n\t"
- "cmp %4, %%"REG_a" \n\t"
- " jb 1b \n\t"
- ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
- : "memory", "%"REG_a
- );
-
- ydst += lumStride;
- src += srcStride;
-
- asm volatile(
- "xor %%"REG_a", %%"REG_a" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
- "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
- "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
- "movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8)
- "movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12)
- "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
- "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
- "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
- "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
- "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
- "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
-
- MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t"
- MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t"
-
- "add $8, %%"REG_a" \n\t"
- "cmp %4, %%"REG_a" \n\t"
- " jb 1b \n\t"
-
- ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
- : "memory", "%"REG_a
- );
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
+ "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
+ "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
+ "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
+ "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
+ "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
+ "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
+
+ MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
+
+ "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(12)
+ "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
+ "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
+ "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
+ "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
+ "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
+ "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
+
+ "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
+ "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
+ "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
+ "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
+ "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
+ "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
+
+ MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
+ MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%"REG_a
+ );
+
+ ydst += lumStride;
+ src += srcStride;
+
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
+ "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
+ "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
+ "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
+ "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
+ "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%"REG_a
+ );
#else
- long i;
- for(i=0; i<chromWidth; i++)
- {
- ydst[2*i+0] = src[4*i+0];
- udst[i] = src[4*i+1];
- ydst[2*i+1] = src[4*i+2];
- vdst[i] = src[4*i+3];
- }
- ydst += lumStride;
- src += srcStride;
-
- for(i=0; i<chromWidth; i++)
- {
- ydst[2*i+0] = src[4*i+0];
- ydst[2*i+1] = src[4*i+2];
- }
-#endif
- udst += chromStride;
- vdst += chromStride;
- ydst += lumStride;
- src += srcStride;
- }
+ long i;
+ for (i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+0];
+ udst[i] = src[4*i+1];
+ ydst[2*i+1] = src[4*i+2];
+ vdst[i] = src[4*i+3];
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for (i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+0];
+ ydst[2*i+1] = src[4*i+2];
+ }
+#endif
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
#ifdef HAVE_MMX
-asm volatile( EMMS" \n\t"
- SFENCE" \n\t"
- :::"memory");
+asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
#endif
}
static inline void RENAME(yvu9toyv12)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc,
- uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
- long width, long height, long lumStride, long chromStride)
+ uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height, long lumStride, long chromStride)
{
- /* Y Plane */
- memcpy(ydst, ysrc, width*height);
+ /* Y Plane */
+ memcpy(ydst, ysrc, width*height);
- /* XXX: implement upscaling for U,V */
+ /* XXX: implement upscaling for U,V */
}
static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, long srcWidth, long srcHeight, long srcStride, long dstStride)
{
- long x,y;
-
- dst[0]= src[0];
-
- // first line
- for(x=0; x<srcWidth-1; x++){
- dst[2*x+1]= (3*src[x] + src[x+1])>>2;
- dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
- }
- dst[2*srcWidth-1]= src[srcWidth-1];
-
+ long x,y;
+
+ dst[0]= src[0];
+
+ // first line
+ for (x=0; x<srcWidth-1; x++){
+ dst[2*x+1]= (3*src[x] + src[x+1])>>2;
+ dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
+ }
+ dst[2*srcWidth-1]= src[srcWidth-1];
+
dst+= dstStride;
- for(y=1; y<srcHeight; y++){
+ for (y=1; y<srcHeight; y++){
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- const long mmxSize= srcWidth&~15;
- asm volatile(
- "mov %4, %%"REG_a" \n\t"
- "1: \n\t"
- "movq (%0, %%"REG_a"), %%mm0 \n\t"
- "movq (%1, %%"REG_a"), %%mm1 \n\t"
- "movq 1(%0, %%"REG_a"), %%mm2 \n\t"
- "movq 1(%1, %%"REG_a"), %%mm3 \n\t"
- "movq -1(%0, %%"REG_a"), %%mm4 \n\t"
- "movq -1(%1, %%"REG_a"), %%mm5 \n\t"
- PAVGB" %%mm0, %%mm5 \n\t"
- PAVGB" %%mm0, %%mm3 \n\t"
- PAVGB" %%mm0, %%mm5 \n\t"
- PAVGB" %%mm0, %%mm3 \n\t"
- PAVGB" %%mm1, %%mm4 \n\t"
- PAVGB" %%mm1, %%mm2 \n\t"
- PAVGB" %%mm1, %%mm4 \n\t"
- PAVGB" %%mm1, %%mm2 \n\t"
- "movq %%mm5, %%mm7 \n\t"
- "movq %%mm4, %%mm6 \n\t"
- "punpcklbw %%mm3, %%mm5 \n\t"
- "punpckhbw %%mm3, %%mm7 \n\t"
- "punpcklbw %%mm2, %%mm4 \n\t"
- "punpckhbw %%mm2, %%mm6 \n\t"
+ const long mmxSize= srcWidth&~15;
+ asm volatile(
+ "mov %4, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "movq 1(%0, %%"REG_a"), %%mm2 \n\t"
+ "movq 1(%1, %%"REG_a"), %%mm3 \n\t"
+ "movq -1(%0, %%"REG_a"), %%mm4 \n\t"
+ "movq -1(%1, %%"REG_a"), %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm3 \n\t"
+ PAVGB" %%mm0, %%mm5 \n\t"
+ PAVGB" %%mm0, %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm1, %%mm2 \n\t"
+ "movq %%mm5, %%mm7 \n\t"
+ "movq %%mm4, %%mm6 \n\t"
+ "punpcklbw %%mm3, %%mm5 \n\t"
+ "punpckhbw %%mm3, %%mm7 \n\t"
+ "punpcklbw %%mm2, %%mm4 \n\t"
+ "punpckhbw %%mm2, %%mm6 \n\t"
#if 1
- MOVNTQ" %%mm5, (%2, %%"REG_a", 2)\n\t"
- MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2)\n\t"
- MOVNTQ" %%mm4, (%3, %%"REG_a", 2)\n\t"
- MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm5, (%2, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm4, (%3, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2) \n\t"
#else
- "movq %%mm5, (%2, %%"REG_a", 2) \n\t"
- "movq %%mm7, 8(%2, %%"REG_a", 2)\n\t"
- "movq %%mm4, (%3, %%"REG_a", 2) \n\t"
- "movq %%mm6, 8(%3, %%"REG_a", 2)\n\t"
-#endif
- "add $8, %%"REG_a" \n\t"
- " js 1b \n\t"
- :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
- "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
- "g" (-mmxSize)
- : "%"REG_a
-
- );
+ "movq %%mm5, (%2, %%"REG_a", 2) \n\t"
+ "movq %%mm7, 8(%2, %%"REG_a", 2) \n\t"
+ "movq %%mm4, (%3, %%"REG_a", 2) \n\t"
+ "movq %%mm6, 8(%3, %%"REG_a", 2) \n\t"
+#endif
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
+ "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
+ "g" (-mmxSize)
+ : "%"REG_a
+
+ );
#else
- const long mmxSize=1;
-#endif
- dst[0 ]= (3*src[0] + src[srcStride])>>2;
- dst[dstStride]= ( src[0] + 3*src[srcStride])>>2;
-
- for(x=mmxSize-1; x<srcWidth-1; x++){
- dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
- dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
- dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
- dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
- }
- dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
- dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
-
- dst+=dstStride*2;
- src+=srcStride;
- }
-
- // last line
+ const long mmxSize=1;
+#endif
+ dst[0 ]= (3*src[0] + src[srcStride])>>2;
+ dst[dstStride]= ( src[0] + 3*src[srcStride])>>2;
+
+ for (x=mmxSize-1; x<srcWidth-1; x++){
+ dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
+ dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
+ dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
+ dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
+ }
+ dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
+ dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
+
+ dst+=dstStride*2;
+ src+=srcStride;
+ }
+
+ // last line
#if 1
- dst[0]= src[0];
-
- for(x=0; x<srcWidth-1; x++){
- dst[2*x+1]= (3*src[x] + src[x+1])>>2;
- dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
- }
- dst[2*srcWidth-1]= src[srcWidth-1];
+ dst[0]= src[0];
+
+ for (x=0; x<srcWidth-1; x++){
+ dst[2*x+1]= (3*src[x] + src[x+1])>>2;
+ dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
+ }
+ dst[2*srcWidth-1]= src[srcWidth-1];
#else
- for(x=0; x<srcWidth; x++){
- dst[2*x+0]=
- dst[2*x+1]= src[x];
- }
+ for (x=0; x<srcWidth; x++){
+ dst[2*x+0]=
+ dst[2*x+1]= src[x];
+ }
#endif
#ifdef HAVE_MMX
-asm volatile( EMMS" \n\t"
- SFENCE" \n\t"
- :::"memory");
+asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
#endif
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
- * problem for anyone then tell me, and ill fix it)
- * chrominance data is only taken from every secound line others are ignored FIXME write HQ version
+ * Height should be a multiple of 2 and width should be a multiple of 16 (if
+ * this is a problem for anyone then tell me, and I will fix it).
+ * Chrominance data is only taken from every secound line, others are ignored.
+ * FIXME: Write HQ version.
*/
static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
- long width, long height,
- long lumStride, long chromStride, long srcStride)
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
{
- long y;
- const long chromWidth= width>>1;
- for(y=0; y<height; y+=2)
- {
+ long y;
+ const long chromWidth= width>>1;
+ for (y=0; y<height; y+=2)
+ {
#ifdef HAVE_MMX
- asm volatile(
- "xorl %%eax, %%eax \n\t"
- "pcmpeqw %%mm7, %%mm7 \n\t"
- "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 64(%0, %%eax, 4) \n\t"
- "movq (%0, %%eax, 4), %%mm0 \n\t" // UYVY UYVY(0)
- "movq 8(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(4)
- "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
- "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
- "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
- "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
- "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
- "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
- "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
- "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
-
- MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t"
-
- "movq 16(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(8)
- "movq 24(%0, %%eax, 4), %%mm2 \n\t" // UYVY UYVY(12)
- "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
- "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
- "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
- "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
- "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
- "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
- "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
- "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
-
- MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t"
-
- "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
- "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
- "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
- "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
- "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
- "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
- "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
- "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
-
- MOVNTQ" %%mm0, (%3, %%eax) \n\t"
- MOVNTQ" %%mm2, (%2, %%eax) \n\t"
-
- "addl $8, %%eax \n\t"
- "cmpl %4, %%eax \n\t"
- " jb 1b \n\t"
- ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
- : "memory", "%eax"
- );
-
- ydst += lumStride;
- src += srcStride;
-
- asm volatile(
- "xorl %%eax, %%eax \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 64(%0, %%eax, 4) \n\t"
- "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
- "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4)
- "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8)
- "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12)
- "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
- "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
- "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
- "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
- "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
- "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
-
- MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t"
- MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t"
-
- "addl $8, %%eax \n\t"
- "cmpl %4, %%eax \n\t"
- " jb 1b \n\t"
-
- ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
- : "memory", "%eax"
- );
+ asm volatile(
+ "xorl %%eax, %%eax \n\t"
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%eax, 4) \n\t"
+ "movq (%0, %%eax, 4), %%mm0 \n\t" // UYVY UYVY(0)
+ "movq 8(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(4)
+ "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
+ "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
+ "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
+ "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
+ "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
+ "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
+
+ MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t"
+
+ "movq 16(%0, %%eax, 4), %%mm1 \n\t" // UYVY UYVY(8)
+ "movq 24(%0, %%eax, 4), %%mm2 \n\t" // UYVY UYVY(12)
+ "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
+ "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
+ "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
+ "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
+ "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t"
+
+ "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
+ "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
+ "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
+ "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
+ "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
+ "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
+ "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
+ "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
+
+ MOVNTQ" %%mm0, (%3, %%eax) \n\t"
+ MOVNTQ" %%mm2, (%2, %%eax) \n\t"
+
+ "addl $8, %%eax \n\t"
+ "cmpl %4, %%eax \n\t"
+ " jb 1b \n\t"
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%eax"
+ );
+
+ ydst += lumStride;
+ src += srcStride;
+
+ asm volatile(
+ "xorl %%eax, %%eax \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%eax, 4) \n\t"
+ "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4)
+ "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12)
+ "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
+ "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
+ "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
+ "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
+ "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
+ "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
+
+ MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t"
+ MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t"
+
+ "addl $8, %%eax \n\t"
+ "cmpl %4, %%eax \n\t"
+ " jb 1b \n\t"
+
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
+ : "memory", "%eax"
+ );
#else
- long i;
- for(i=0; i<chromWidth; i++)
- {
- udst[i] = src[4*i+0];
- ydst[2*i+0] = src[4*i+1];
- vdst[i] = src[4*i+2];
- ydst[2*i+1] = src[4*i+3];
- }
- ydst += lumStride;
- src += srcStride;
-
- for(i=0; i<chromWidth; i++)
- {
- ydst[2*i+0] = src[4*i+1];
- ydst[2*i+1] = src[4*i+3];
- }
-#endif
- udst += chromStride;
- vdst += chromStride;
- ydst += lumStride;
- src += srcStride;
- }
+ long i;
+ for (i=0; i<chromWidth; i++)
+ {
+ udst[i] = src[4*i+0];
+ ydst[2*i+0] = src[4*i+1];
+ vdst[i] = src[4*i+2];
+ ydst[2*i+1] = src[4*i+3];
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for (i=0; i<chromWidth; i++)
+ {
+ ydst[2*i+0] = src[4*i+1];
+ ydst[2*i+1] = src[4*i+3];
+ }
+#endif
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
#ifdef HAVE_MMX
-asm volatile( EMMS" \n\t"
- SFENCE" \n\t"
- :::"memory");
+asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
#endif
}
/**
- *
- * height should be a multiple of 2 and width should be a multiple of 2 (if this is a
- * problem for anyone then tell me, and ill fix it)
- * chrominance data is only taken from every secound line others are ignored in the C version FIXME write HQ version
+ * Height should be a multiple of 2 and width should be a multiple of 2 (if
+ * this is a problem for anyone then tell me, and I will fix it).
+ * Chrominance data is only taken from every secound line,
+ * others are ignored in the C version.
+ * FIXME: Write HQ version.
*/
static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
- long width, long height,
- long lumStride, long chromStride, long srcStride)
+ long width, long height,
+ long lumStride, long chromStride, long srcStride)
{
- long y;
- const long chromWidth= width>>1;
+ long y;
+ const long chromWidth= width>>1;
#ifdef HAVE_MMX
- for(y=0; y<height-2; y+=2)
- {
- long i;
- for(i=0; i<2; i++)
- {
- asm volatile(
- "mov %2, %%"REG_a" \n\t"
- "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
- "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 64(%0, %%"REG_d") \n\t"
- "movd (%0, %%"REG_d"), %%mm0 \n\t"
- "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
- "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "pmaddwd %%mm6, %%mm0 \n\t"
- "pmaddwd %%mm6, %%mm1 \n\t"
- "pmaddwd %%mm6, %%mm2 \n\t"
- "pmaddwd %%mm6, %%mm3 \n\t"
+ for (y=0; y<height-2; y+=2)
+ {
+ long i;
+ for (i=0; i<2; i++)
+ {
+ asm volatile(
+ "mov %2, %%"REG_a" \n\t"
+ "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_d") \n\t"
+ "movd (%0, %%"REG_d"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
#ifndef FAST_BGR2YV12
- "psrad $8, %%mm0 \n\t"
- "psrad $8, %%mm1 \n\t"
- "psrad $8, %%mm2 \n\t"
- "psrad $8, %%mm3 \n\t"
-#endif
- "packssdw %%mm1, %%mm0 \n\t"
- "packssdw %%mm3, %%mm2 \n\t"
- "pmaddwd %%mm5, %%mm0 \n\t"
- "pmaddwd %%mm5, %%mm2 \n\t"
- "packssdw %%mm2, %%mm0 \n\t"
- "psraw $7, %%mm0 \n\t"
-
- "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
- "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
- "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "pmaddwd %%mm6, %%mm4 \n\t"
- "pmaddwd %%mm6, %%mm1 \n\t"
- "pmaddwd %%mm6, %%mm2 \n\t"
- "pmaddwd %%mm6, %%mm3 \n\t"
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "packssdw %%mm2, %%mm0 \n\t"
+ "psraw $7, %%mm0 \n\t"
+
+ "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
#ifndef FAST_BGR2YV12
- "psrad $8, %%mm4 \n\t"
- "psrad $8, %%mm1 \n\t"
- "psrad $8, %%mm2 \n\t"
- "psrad $8, %%mm3 \n\t"
-#endif
- "packssdw %%mm1, %%mm4 \n\t"
- "packssdw %%mm3, %%mm2 \n\t"
- "pmaddwd %%mm5, %%mm4 \n\t"
- "pmaddwd %%mm5, %%mm2 \n\t"
- "add $24, %%"REG_d" \n\t"
- "packssdw %%mm2, %%mm4 \n\t"
- "psraw $7, %%mm4 \n\t"
-
- "packuswb %%mm4, %%mm0 \n\t"
- "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
-
- MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
- "add $8, %%"REG_a" \n\t"
- " js 1b \n\t"
- : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
- : "%"REG_a, "%"REG_d
- );
- ydst += lumStride;
- src += srcStride;
- }
- src -= srcStride*2;
- asm volatile(
- "mov %4, %%"REG_a" \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
- "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
- "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
- "add %%"REG_d", %%"REG_d" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 64(%0, %%"REG_d") \n\t"
- PREFETCH" 64(%1, %%"REG_d") \n\t"
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "add $24, %%"REG_d" \n\t"
+ "packssdw %%mm2, %%mm4 \n\t"
+ "psraw $7, %%mm4 \n\t"
+
+ "packuswb %%mm4, %%mm0 \n\t"
+ "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
+
+ MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
+ : "%"REG_a, "%"REG_d
+ );
+ ydst += lumStride;
+ src += srcStride;
+ }
+ src -= srcStride*2;
+ asm volatile(
+ "mov %4, %%"REG_a" \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
+ "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
+ "add %%"REG_d", %%"REG_d" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_d") \n\t"
+ PREFETCH" 64(%1, %%"REG_d") \n\t"
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- "movq (%0, %%"REG_d"), %%mm0 \n\t"
- "movq (%1, %%"REG_d"), %%mm1 \n\t"
- "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
- "movq 6(%1, %%"REG_d"), %%mm3 \n\t"
- PAVGB" %%mm1, %%mm0 \n\t"
- PAVGB" %%mm3, %%mm2 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm3 \n\t"
- "psrlq $24, %%mm0 \n\t"
- "psrlq $24, %%mm2 \n\t"
- PAVGB" %%mm1, %%mm0 \n\t"
- PAVGB" %%mm3, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
+ "movq (%0, %%"REG_d"), %%mm0 \n\t"
+ "movq (%1, %%"REG_d"), %%mm1 \n\t"
+ "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
+ "movq 6(%1, %%"REG_d"), %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm0 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm0 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
#else
- "movd (%0, %%"REG_d"), %%mm0 \n\t"
- "movd (%1, %%"REG_d"), %%mm1 \n\t"
- "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
- "movd 3(%1, %%"REG_d"), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "paddw %%mm1, %%mm0 \n\t"
- "paddw %%mm3, %%mm2 \n\t"
- "paddw %%mm2, %%mm0 \n\t"
- "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
- "movd 6(%1, %%"REG_d"), %%mm1 \n\t"
- "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
- "movd 9(%1, %%"REG_d"), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "paddw %%mm1, %%mm4 \n\t"
- "paddw %%mm3, %%mm2 \n\t"
- "paddw %%mm4, %%mm2 \n\t"
- "psrlw $2, %%mm0 \n\t"
- "psrlw $2, %%mm2 \n\t"
-#endif
- "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
- "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
-
- "pmaddwd %%mm0, %%mm1 \n\t"
- "pmaddwd %%mm2, %%mm3 \n\t"
- "pmaddwd %%mm6, %%mm0 \n\t"
- "pmaddwd %%mm6, %%mm2 \n\t"
+ "movd (%0, %%"REG_d"), %%mm0 \n\t"
+ "movd (%1, %%"REG_d"), %%mm1 \n\t"
+ "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 3(%1, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 6(%1, %%"REG_d"), %%mm1 \n\t"
+ "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 9(%1, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm4, %%mm2 \n\t"
+ "psrlw $2, %%mm0 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm0, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
#ifndef FAST_BGR2YV12
- "psrad $8, %%mm0 \n\t"
- "psrad $8, %%mm1 \n\t"
- "psrad $8, %%mm2 \n\t"
- "psrad $8, %%mm3 \n\t"
-#endif
- "packssdw %%mm2, %%mm0 \n\t"
- "packssdw %%mm3, %%mm1 \n\t"
- "pmaddwd %%mm5, %%mm0 \n\t"
- "pmaddwd %%mm5, %%mm1 \n\t"
- "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
- "psraw $7, %%mm0 \n\t"
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
+ "psraw $7, %%mm0 \n\t"
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
- "movq 12(%1, %%"REG_d"), %%mm1 \n\t"
- "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
- "movq 18(%1, %%"REG_d"), %%mm3 \n\t"
- PAVGB" %%mm1, %%mm4 \n\t"
- PAVGB" %%mm3, %%mm2 \n\t"
- "movq %%mm4, %%mm1 \n\t"
- "movq %%mm2, %%mm3 \n\t"
- "psrlq $24, %%mm4 \n\t"
- "psrlq $24, %%mm2 \n\t"
- PAVGB" %%mm1, %%mm4 \n\t"
- PAVGB" %%mm3, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
+ "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movq 12(%1, %%"REG_d"), %%mm1 \n\t"
+ "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
+ "movq 18(%1, %%"REG_d"), %%mm3 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "movq %%mm4, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm4 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB" %%mm1, %%mm4 \n\t"
+ PAVGB" %%mm3, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
#else
- "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
- "movd 12(%1, %%"REG_d"), %%mm1 \n\t"
- "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
- "movd 15(%1, %%"REG_d"), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "paddw %%mm1, %%mm4 \n\t"
- "paddw %%mm3, %%mm2 \n\t"
- "paddw %%mm2, %%mm4 \n\t"
- "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
- "movd 18(%1, %%"REG_d"), %%mm1 \n\t"
- "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
- "movd 21(%1, %%"REG_d"), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm5 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "paddw %%mm1, %%mm5 \n\t"
- "paddw %%mm3, %%mm2 \n\t"
- "paddw %%mm5, %%mm2 \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
- "psrlw $2, %%mm4 \n\t"
- "psrlw $2, %%mm2 \n\t"
-#endif
- "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
- "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
-
- "pmaddwd %%mm4, %%mm1 \n\t"
- "pmaddwd %%mm2, %%mm3 \n\t"
- "pmaddwd %%mm6, %%mm4 \n\t"
- "pmaddwd %%mm6, %%mm2 \n\t"
+ "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 12(%1, %%"REG_d"), %%mm1 \n\t"
+ "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 15(%1, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm4 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm2, %%mm4 \n\t"
+ "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
+ "movd 18(%1, %%"REG_d"), %%mm1 \n\t"
+ "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 21(%1, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "paddw %%mm1, %%mm5 \n\t"
+ "paddw %%mm3, %%mm2 \n\t"
+ "paddw %%mm5, %%mm2 \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm4, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
#ifndef FAST_BGR2YV12
- "psrad $8, %%mm4 \n\t"
- "psrad $8, %%mm1 \n\t"
- "psrad $8, %%mm2 \n\t"
- "psrad $8, %%mm3 \n\t"
-#endif
- "packssdw %%mm2, %%mm4 \n\t"
- "packssdw %%mm3, %%mm1 \n\t"
- "pmaddwd %%mm5, %%mm4 \n\t"
- "pmaddwd %%mm5, %%mm1 \n\t"
- "add $24, %%"REG_d" \n\t"
- "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
- "psraw $7, %%mm4 \n\t"
-
- "movq %%mm0, %%mm1 \n\t"
- "punpckldq %%mm4, %%mm0 \n\t"
- "punpckhdq %%mm4, %%mm1 \n\t"
- "packsswb %%mm1, %%mm0 \n\t"
- "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
- "movd %%mm0, (%2, %%"REG_a") \n\t"
- "punpckhdq %%mm0, %%mm0 \n\t"
- "movd %%mm0, (%3, %%"REG_a") \n\t"
- "add $4, %%"REG_a" \n\t"
- " js 1b \n\t"
- : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
- : "%"REG_a, "%"REG_d
- );
-
- udst += chromStride;
- vdst += chromStride;
- src += srcStride*2;
- }
-
- asm volatile( EMMS" \n\t"
- SFENCE" \n\t"
- :::"memory");
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "add $24, %%"REG_d" \n\t"
+ "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
+ "psraw $7, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm1 \n\t"
+ "punpckldq %%mm4, %%mm0 \n\t"
+ "punpckhdq %%mm4, %%mm1 \n\t"
+ "packsswb %%mm1, %%mm0 \n\t"
+ "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
+ "movd %%mm0, (%2, %%"REG_a") \n\t"
+ "punpckhdq %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
+ : "%"REG_a, "%"REG_d
+ );
+
+ udst += chromStride;
+ vdst += chromStride;
+ src += srcStride*2;
+ }
+
+ asm volatile( EMMS" \n\t"
+ SFENCE" \n\t"
+ :::"memory");
#else
- y=0;
-#endif
- for(; y<height; y+=2)
- {
- long i;
- for(i=0; i<chromWidth; i++)
- {
- unsigned int b= src[6*i+0];
- unsigned int g= src[6*i+1];
- unsigned int r= src[6*i+2];
-
- unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
- unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128;
- unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128;
-
- udst[i] = U;
- vdst[i] = V;
- ydst[2*i] = Y;
-
- b= src[6*i+3];
- g= src[6*i+4];
- r= src[6*i+5];
-
- Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
- ydst[2*i+1] = Y;
- }
- ydst += lumStride;
- src += srcStride;
-
- for(i=0; i<chromWidth; i++)
- {
- unsigned int b= src[6*i+0];
- unsigned int g= src[6*i+1];
- unsigned int r= src[6*i+2];
-
- unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
-
- ydst[2*i] = Y;
-
- b= src[6*i+3];
- g= src[6*i+4];
- r= src[6*i+5];
-
- Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
- ydst[2*i+1] = Y;
- }
- udst += chromStride;
- vdst += chromStride;
- ydst += lumStride;
- src += srcStride;
- }
+ y=0;
+#endif
+ for (; y<height; y+=2)
+ {
+ long i;
+ for (i=0; i<chromWidth; i++)
+ {
+ unsigned int b = src[6*i+0];
+ unsigned int g = src[6*i+1];
+ unsigned int r = src[6*i+2];
+
+ unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ unsigned int V = ((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128;
+ unsigned int U = ((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128;
+
+ udst[i] = U;
+ vdst[i] = V;
+ ydst[2*i] = Y;
+
+ b = src[6*i+3];
+ g = src[6*i+4];
+ r = src[6*i+5];
+
+ Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ ydst[2*i+1] = Y;
+ }
+ ydst += lumStride;
+ src += srcStride;
+
+ for (i=0; i<chromWidth; i++)
+ {
+ unsigned int b = src[6*i+0];
+ unsigned int g = src[6*i+1];
+ unsigned int r = src[6*i+2];
+
+ unsigned int Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+
+ ydst[2*i] = Y;
+
+ b = src[6*i+3];
+ g = src[6*i+4];
+ r = src[6*i+5];
+
+ Y = ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
+ ydst[2*i+1] = Y;
+ }
+ udst += chromStride;
+ vdst += chromStride;
+ ydst += lumStride;
+ src += srcStride;
+ }
}
void RENAME(interleaveBytes)(uint8_t *src1, uint8_t *src2, uint8_t *dest,
- long width, long height, long src1Stride,
- long src2Stride, long dstStride){
- long h;
+ long width, long height, long src1Stride,
+ long src2Stride, long dstStride){
+ long h;
- for(h=0; h < height; h++)
- {
- long w;
+ for (h=0; h < height; h++)
+ {
+ long w;
#ifdef HAVE_MMX
#ifdef HAVE_SSE2
- asm(
- "xor %%"REG_a", %%"REG_a" \n\t"
- "1: \n\t"
- PREFETCH" 64(%1, %%"REG_a") \n\t"
- PREFETCH" 64(%2, %%"REG_a") \n\t"
- "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
- "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
- "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
- "punpcklbw %%xmm2, %%xmm0 \n\t"
- "punpckhbw %%xmm2, %%xmm1 \n\t"
- "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t"
- "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t"
- "add $16, %%"REG_a" \n\t"
- "cmp %3, %%"REG_a" \n\t"
- " jb 1b \n\t"
- ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
- : "memory", "%"REG_a""
- );
+ asm(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "1: \n\t"
+ PREFETCH" 64(%1, %%"REG_a") \n\t"
+ PREFETCH" 64(%2, %%"REG_a") \n\t"
+ "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
+ "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
+ "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
+ "punpcklbw %%xmm2, %%xmm0 \n\t"
+ "punpckhbw %%xmm2, %%xmm1 \n\t"
+ "movntdq %%xmm0, (%0, %%"REG_a", 2) \n\t"
+ "movntdq %%xmm1, 16(%0, %%"REG_a", 2) \n\t"
+ "add $16, %%"REG_a" \n\t"
+ "cmp %3, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
+ : "memory", "%"REG_a""
+ );
#else
- asm(
- "xor %%"REG_a", %%"REG_a" \n\t"
- "1: \n\t"
- PREFETCH" 64(%1, %%"REG_a") \n\t"
- PREFETCH" 64(%2, %%"REG_a") \n\t"
- "movq (%1, %%"REG_a"), %%mm0 \n\t"
- "movq 8(%1, %%"REG_a"), %%mm2 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm3 \n\t"
- "movq (%2, %%"REG_a"), %%mm4 \n\t"
- "movq 8(%2, %%"REG_a"), %%mm5 \n\t"
- "punpcklbw %%mm4, %%mm0 \n\t"
- "punpckhbw %%mm4, %%mm1 \n\t"
- "punpcklbw %%mm5, %%mm2 \n\t"
- "punpckhbw %%mm5, %%mm3 \n\t"
- MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t"
- MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t"
- MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t"
- MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t"
- "add $16, %%"REG_a" \n\t"
- "cmp %3, %%"REG_a" \n\t"
- " jb 1b \n\t"
- ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
- : "memory", "%"REG_a
- );
-#endif
- for(w= (width&(~15)); w < width; w++)
- {
- dest[2*w+0] = src1[w];
- dest[2*w+1] = src2[w];
- }
+ asm(
+ "xor %%"REG_a", %%"REG_a" \n\t"
+ "1: \n\t"
+ PREFETCH" 64(%1, %%"REG_a") \n\t"
+ PREFETCH" 64(%2, %%"REG_a") \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a"), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq (%2, %%"REG_a"), %%mm4 \n\t"
+ "movq 8(%2, %%"REG_a"), %%mm5 \n\t"
+ "punpcklbw %%mm4, %%mm0 \n\t"
+ "punpckhbw %%mm4, %%mm1 \n\t"
+ "punpcklbw %%mm5, %%mm2 \n\t"
+ "punpckhbw %%mm5, %%mm3 \n\t"
+ MOVNTQ" %%mm0, (%0, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2) \n\t"
+ MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2) \n\t"
+ "add $16, %%"REG_a" \n\t"
+ "cmp %3, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+ ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
+ : "memory", "%"REG_a
+ );
+#endif
+ for (w= (width&(~15)); w < width; w++)
+ {
+ dest[2*w+0] = src1[w];
+ dest[2*w+1] = src2[w];
+ }
#else
- for(w=0; w < width; w++)
- {
- dest[2*w+0] = src1[w];
- dest[2*w+1] = src2[w];
- }
+ for (w=0; w < width; w++)
+ {
+ dest[2*w+0] = src1[w];
+ dest[2*w+1] = src2[w];
+ }
#endif
- dest += dstStride;
+ dest += dstStride;
src1 += src1Stride;
src2 += src2Stride;
- }
+ }
#ifdef HAVE_MMX
- asm(
- EMMS" \n\t"
- SFENCE" \n\t"
- ::: "memory"
- );
+ asm(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
#endif
}
static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
- uint8_t *dst1, uint8_t *dst2,
- long width, long height,
- long srcStride1, long srcStride2,
- long dstStride1, long dstStride2)
+ uint8_t *dst1, uint8_t *dst2,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long dstStride1, long dstStride2)
{
long y,x,w,h;
w=width/2; h=height/2;
#ifdef HAVE_MMX
asm volatile(
- PREFETCH" %0\n\t"
- PREFETCH" %1\n\t"
- ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
-#endif
- for(y=0;y<h;y++){
- const uint8_t* s1=src1+srcStride1*(y>>1);
- uint8_t* d=dst1+dstStride1*y;
- x=0;
+ PREFETCH" %0 \n\t"
+ PREFETCH" %1 \n\t"
+ ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
+#endif
+ for (y=0;y<h;y++){
+ const uint8_t* s1=src1+srcStride1*(y>>1);
+ uint8_t* d=dst1+dstStride1*y;
+ x=0;
#ifdef HAVE_MMX
- for(;x<w-31;x+=32)
- {
- asm volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq 8%1, %%mm2\n\t"
- "movq 16%1, %%mm4\n\t"
- "movq 24%1, %%mm6\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm2, %%mm3\n\t"
- "movq %%mm4, %%mm5\n\t"
- "movq %%mm6, %%mm7\n\t"
- "punpcklbw %%mm0, %%mm0\n\t"
- "punpckhbw %%mm1, %%mm1\n\t"
- "punpcklbw %%mm2, %%mm2\n\t"
- "punpckhbw %%mm3, %%mm3\n\t"
- "punpcklbw %%mm4, %%mm4\n\t"
- "punpckhbw %%mm5, %%mm5\n\t"
- "punpcklbw %%mm6, %%mm6\n\t"
- "punpckhbw %%mm7, %%mm7\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm1, 8%0\n\t"
- MOVNTQ" %%mm2, 16%0\n\t"
- MOVNTQ" %%mm3, 24%0\n\t"
- MOVNTQ" %%mm4, 32%0\n\t"
- MOVNTQ" %%mm5, 40%0\n\t"
- MOVNTQ" %%mm6, 48%0\n\t"
- MOVNTQ" %%mm7, 56%0"
- :"=m"(d[2*x])
- :"m"(s1[x])
- :"memory");
- }
-#endif
- for(;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
+ for (;x<w-31;x+=32)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "movq 16%1, %%mm4 \n\t"
+ "movq 24%1, %%mm6 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "movq %%mm6, %%mm7 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpcklbw %%mm2, %%mm2 \n\t"
+ "punpckhbw %%mm3, %%mm3 \n\t"
+ "punpcklbw %%mm4, %%mm4 \n\t"
+ "punpckhbw %%mm5, %%mm5 \n\t"
+ "punpcklbw %%mm6, %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm7 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm2, 16%0 \n\t"
+ MOVNTQ" %%mm3, 24%0 \n\t"
+ MOVNTQ" %%mm4, 32%0 \n\t"
+ MOVNTQ" %%mm5, 40%0 \n\t"
+ MOVNTQ" %%mm6, 48%0 \n\t"
+ MOVNTQ" %%mm7, 56%0"
+ :"=m"(d[2*x])
+ :"m"(s1[x])
+ :"memory");
+ }
+#endif
+ for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
}
- for(y=0;y<h;y++){
- const uint8_t* s2=src2+srcStride2*(y>>1);
- uint8_t* d=dst2+dstStride2*y;
- x=0;
+ for (y=0;y<h;y++){
+ const uint8_t* s2=src2+srcStride2*(y>>1);
+ uint8_t* d=dst2+dstStride2*y;
+ x=0;
#ifdef HAVE_MMX
- for(;x<w-31;x+=32)
- {
- asm volatile(
- PREFETCH" 32%1\n\t"
- "movq %1, %%mm0\n\t"
- "movq 8%1, %%mm2\n\t"
- "movq 16%1, %%mm4\n\t"
- "movq 24%1, %%mm6\n\t"
- "movq %%mm0, %%mm1\n\t"
- "movq %%mm2, %%mm3\n\t"
- "movq %%mm4, %%mm5\n\t"
- "movq %%mm6, %%mm7\n\t"
- "punpcklbw %%mm0, %%mm0\n\t"
- "punpckhbw %%mm1, %%mm1\n\t"
- "punpcklbw %%mm2, %%mm2\n\t"
- "punpckhbw %%mm3, %%mm3\n\t"
- "punpcklbw %%mm4, %%mm4\n\t"
- "punpckhbw %%mm5, %%mm5\n\t"
- "punpcklbw %%mm6, %%mm6\n\t"
- "punpckhbw %%mm7, %%mm7\n\t"
- MOVNTQ" %%mm0, %0\n\t"
- MOVNTQ" %%mm1, 8%0\n\t"
- MOVNTQ" %%mm2, 16%0\n\t"
- MOVNTQ" %%mm3, 24%0\n\t"
- MOVNTQ" %%mm4, 32%0\n\t"
- MOVNTQ" %%mm5, 40%0\n\t"
- MOVNTQ" %%mm6, 48%0\n\t"
- MOVNTQ" %%mm7, 56%0"
- :"=m"(d[2*x])
- :"m"(s2[x])
- :"memory");
- }
-#endif
- for(;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
+ for (;x<w-31;x+=32)
+ {
+ asm volatile(
+ PREFETCH" 32%1 \n\t"
+ "movq %1, %%mm0 \n\t"
+ "movq 8%1, %%mm2 \n\t"
+ "movq 16%1, %%mm4 \n\t"
+ "movq 24%1, %%mm6 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "movq %%mm4, %%mm5 \n\t"
+ "movq %%mm6, %%mm7 \n\t"
+ "punpcklbw %%mm0, %%mm0 \n\t"
+ "punpckhbw %%mm1, %%mm1 \n\t"
+ "punpcklbw %%mm2, %%mm2 \n\t"
+ "punpckhbw %%mm3, %%mm3 \n\t"
+ "punpcklbw %%mm4, %%mm4 \n\t"
+ "punpckhbw %%mm5, %%mm5 \n\t"
+ "punpcklbw %%mm6, %%mm6 \n\t"
+ "punpckhbw %%mm7, %%mm7 \n\t"
+ MOVNTQ" %%mm0, %0 \n\t"
+ MOVNTQ" %%mm1, 8%0 \n\t"
+ MOVNTQ" %%mm2, 16%0 \n\t"
+ MOVNTQ" %%mm3, 24%0 \n\t"
+ MOVNTQ" %%mm4, 32%0 \n\t"
+ MOVNTQ" %%mm5, 40%0 \n\t"
+ MOVNTQ" %%mm6, 48%0 \n\t"
+ MOVNTQ" %%mm7, 56%0"
+ :"=m"(d[2*x])
+ :"m"(s2[x])
+ :"memory");
+ }
+#endif
+ for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
}
#ifdef HAVE_MMX
- asm(
- EMMS" \n\t"
- SFENCE" \n\t"
- ::: "memory"
- );
+ asm(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
#endif
}
static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
- uint8_t *dst,
- long width, long height,
- long srcStride1, long srcStride2,
- long srcStride3, long dstStride)
+ uint8_t *dst,
+ long width, long height,
+ long srcStride1, long srcStride2,
+ long srcStride3, long dstStride)
{
long y,x,w,h;
w=width/2; h=height;
- for(y=0;y<h;y++){
- const uint8_t* yp=src1+srcStride1*y;
- const uint8_t* up=src2+srcStride2*(y>>2);
- const uint8_t* vp=src3+srcStride3*(y>>2);
- uint8_t* d=dst+dstStride*y;
- x=0;
+ for (y=0;y<h;y++){
+ const uint8_t* yp=src1+srcStride1*y;
+ const uint8_t* up=src2+srcStride2*(y>>2);
+ const uint8_t* vp=src3+srcStride3*(y>>2);
+ uint8_t* d=dst+dstStride*y;
+ x=0;
#ifdef HAVE_MMX
- for(;x<w-7;x+=8)
- {
- asm volatile(
- PREFETCH" 32(%1, %0)\n\t"
- PREFETCH" 32(%2, %0)\n\t"
- PREFETCH" 32(%3, %0)\n\t"
- "movq (%1, %0, 4), %%mm0\n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
- "movq (%2, %0), %%mm1\n\t" /* U0U1U2U3U4U5U6U7 */
- "movq (%3, %0), %%mm2\n\t" /* V0V1V2V3V4V5V6V7 */
- "movq %%mm0, %%mm3\n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
- "movq %%mm1, %%mm4\n\t" /* U0U1U2U3U4U5U6U7 */
- "movq %%mm2, %%mm5\n\t" /* V0V1V2V3V4V5V6V7 */
- "punpcklbw %%mm1, %%mm1\n\t" /* U0U0 U1U1 U2U2 U3U3 */
- "punpcklbw %%mm2, %%mm2\n\t" /* V0V0 V1V1 V2V2 V3V3 */
- "punpckhbw %%mm4, %%mm4\n\t" /* U4U4 U5U5 U6U6 U7U7 */
- "punpckhbw %%mm5, %%mm5\n\t" /* V4V4 V5V5 V6V6 V7V7 */
-
- "movq %%mm1, %%mm6\n\t"
- "punpcklbw %%mm2, %%mm1\n\t" /* U0V0 U0V0 U1V1 U1V1*/
- "punpcklbw %%mm1, %%mm0\n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
- "punpckhbw %%mm1, %%mm3\n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
- MOVNTQ" %%mm0, (%4, %0, 8)\n\t"
- MOVNTQ" %%mm3, 8(%4, %0, 8)\n\t"
-
- "punpckhbw %%mm2, %%mm6\n\t" /* U2V2 U2V2 U3V3 U3V3*/
- "movq 8(%1, %0, 4), %%mm0\n\t"
- "movq %%mm0, %%mm3\n\t"
- "punpcklbw %%mm6, %%mm0\n\t" /* Y U2 Y V2 Y U2 Y V2*/
- "punpckhbw %%mm6, %%mm3\n\t" /* Y U3 Y V3 Y U3 Y V3*/
- MOVNTQ" %%mm0, 16(%4, %0, 8)\n\t"
- MOVNTQ" %%mm3, 24(%4, %0, 8)\n\t"
-
- "movq %%mm4, %%mm6\n\t"
- "movq 16(%1, %0, 4), %%mm0\n\t"
- "movq %%mm0, %%mm3\n\t"
- "punpcklbw %%mm5, %%mm4\n\t"
- "punpcklbw %%mm4, %%mm0\n\t" /* Y U4 Y V4 Y U4 Y V4*/
- "punpckhbw %%mm4, %%mm3\n\t" /* Y U5 Y V5 Y U5 Y V5*/
- MOVNTQ" %%mm0, 32(%4, %0, 8)\n\t"
- MOVNTQ" %%mm3, 40(%4, %0, 8)\n\t"
-
- "punpckhbw %%mm5, %%mm6\n\t"
- "movq 24(%1, %0, 4), %%mm0\n\t"
- "movq %%mm0, %%mm3\n\t"
- "punpcklbw %%mm6, %%mm0\n\t" /* Y U6 Y V6 Y U6 Y V6*/
- "punpckhbw %%mm6, %%mm3\n\t" /* Y U7 Y V7 Y U7 Y V7*/
- MOVNTQ" %%mm0, 48(%4, %0, 8)\n\t"
- MOVNTQ" %%mm3, 56(%4, %0, 8)\n\t"
-
- : "+r" (x)
- : "r"(yp), "r" (up), "r"(vp), "r"(d)
- :"memory");
- }
-#endif
- for(; x<w; x++)
- {
- const long x2= x<<2;
- d[8*x+0]=yp[x2];
- d[8*x+1]=up[x];
- d[8*x+2]=yp[x2+1];
- d[8*x+3]=vp[x];
- d[8*x+4]=yp[x2+2];
- d[8*x+5]=up[x];
- d[8*x+6]=yp[x2+3];
- d[8*x+7]=vp[x];
- }
+ for (;x<w-7;x+=8)
+ {
+ asm volatile(
+ PREFETCH" 32(%1, %0) \n\t"
+ PREFETCH" 32(%2, %0) \n\t"
+ PREFETCH" 32(%3, %0) \n\t"
+ "movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+ "movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */
+ "movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */
+ "movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
+ "movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */
+ "movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */
+ "punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */
+ "punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */
+ "punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */
+ "punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */
+
+ "movq %%mm1, %%mm6 \n\t"
+ "punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/
+ "punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
+ "punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
+ MOVNTQ" %%mm0, (%4, %0, 8) \n\t"
+ MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t"
+
+ "punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/
+ "movq 8(%1, %0, 4), %%mm0 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/
+ "punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/
+ MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t"
+ MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t"
+
+ "movq %%mm4, %%mm6 \n\t"
+ "movq 16(%1, %0, 4), %%mm0 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklbw %%mm5, %%mm4 \n\t"
+ "punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/
+ "punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/
+ MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t"
+ MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t"
+
+ "punpckhbw %%mm5, %%mm6 \n\t"
+ "movq 24(%1, %0, 4), %%mm0 \n\t"
+ "movq %%mm0, %%mm3 \n\t"
+ "punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/
+ "punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/
+ MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t"
+ MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t"
+
+ : "+r" (x)
+ : "r"(yp), "r" (up), "r"(vp), "r"(d)
+ :"memory");
+ }
+#endif
+ for (; x<w; x++)
+ {
+ const long x2 = x<<2;
+ d[8*x+0] = yp[x2];
+ d[8*x+1] = up[x];
+ d[8*x+2] = yp[x2+1];
+ d[8*x+3] = vp[x];
+ d[8*x+4] = yp[x2+2];
+ d[8*x+5] = up[x];
+ d[8*x+6] = yp[x2+3];
+ d[8*x+7] = vp[x];
+ }
}
#ifdef HAVE_MMX
- asm(
- EMMS" \n\t"
- SFENCE" \n\t"
- ::: "memory"
- );
+ asm(
+ EMMS" \n\t"
+ SFENCE" \n\t"
+ ::: "memory"
+ );
#endif
}
+
+static inline void RENAME(rgb2rgb_init)(void){
+ rgb15to16 = RENAME(rgb15to16);
+ rgb15to24 = RENAME(rgb15to24);
+ rgb15to32 = RENAME(rgb15to32);
+ rgb16to24 = RENAME(rgb16to24);
+ rgb16to32 = RENAME(rgb16to32);
+ rgb16to15 = RENAME(rgb16to15);
+ rgb24to16 = RENAME(rgb24to16);
+ rgb24to15 = RENAME(rgb24to15);
+ rgb24to32 = RENAME(rgb24to32);
+ rgb32to16 = RENAME(rgb32to16);
+ rgb32to15 = RENAME(rgb32to15);
+ rgb32to24 = RENAME(rgb32to24);
+ rgb24tobgr15 = RENAME(rgb24tobgr15);
+ rgb24tobgr16 = RENAME(rgb24tobgr16);
+ rgb24tobgr24 = RENAME(rgb24tobgr24);
+ rgb32tobgr32 = RENAME(rgb32tobgr32);
+ rgb32tobgr16 = RENAME(rgb32tobgr16);
+ rgb32tobgr15 = RENAME(rgb32tobgr15);
+ yv12toyuy2 = RENAME(yv12toyuy2);
+ yv12touyvy = RENAME(yv12touyvy);
+ yuv422ptoyuy2 = RENAME(yuv422ptoyuy2);
+ yuy2toyv12 = RENAME(yuy2toyv12);
+// uyvytoyv12 = RENAME(uyvytoyv12);
+// yvu9toyv12 = RENAME(yvu9toyv12);
+ planar2x = RENAME(planar2x);
+ rgb24toyv12 = RENAME(rgb24toyv12);
+ interleaveBytes = RENAME(interleaveBytes);
+ vu9_to_vu12 = RENAME(vu9_to_vu12);
+ yvu9_to_yuy2 = RENAME(yvu9_to_yuy2);
+}
diff --git a/contrib/ffmpeg/libswscale/swscale-example.c b/contrib/ffmpeg/libswscale/swscale-example.c
index 40f04c7d0..e53c0b3eb 100644
--- a/contrib/ffmpeg/libswscale/swscale-example.c
+++ b/contrib/ffmpeg/libswscale/swscale-example.c
@@ -15,7 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
@@ -31,129 +31,129 @@
#include "rgb2rgb.h"
static uint64_t getSSD(uint8_t *src1, uint8_t *src2, int stride1, int stride2, int w, int h){
- int x,y;
- uint64_t ssd=0;
+ int x,y;
+ uint64_t ssd=0;
//printf("%d %d\n", w, h);
-
- for(y=0; y<h; y++){
- for(x=0; x<w; x++){
- int d= src1[x + y*stride1] - src2[x + y*stride2];
- ssd+= d*d;
+
+ for (y=0; y<h; y++){
+ for (x=0; x<w; x++){
+ int d= src1[x + y*stride1] - src2[x + y*stride2];
+ ssd+= d*d;
//printf("%d", abs(src1[x + y*stride1] - src2[x + y*stride2])/26 );
- }
+ }
//printf("\n");
- }
- return ssd;
+ }
+ return ssd;
}
// test by ref -> src -> dst -> out & compare out against ref
// ref & out are YV12
-static int doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcFormat, int dstFormat,
- int srcW, int srcH, int dstW, int dstH, int flags){
- uint8_t *src[3];
- uint8_t *dst[3];
- uint8_t *out[3];
- int srcStride[3], dstStride[3];
- int i;
- uint64_t ssdY, ssdU, ssdV;
- struct SwsContext *srcContext, *dstContext, *outContext;
- int res;
-
- res = 0;
- for(i=0; i<3; i++){
- // avoid stride % bpp != 0
- if(srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24)
- srcStride[i]= srcW*3;
- else
- srcStride[i]= srcW*4;
-
- if(dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24)
- dstStride[i]= dstW*3;
- else
- dstStride[i]= dstW*4;
-
- src[i]= (uint8_t*) malloc(srcStride[i]*srcH);
- dst[i]= (uint8_t*) malloc(dstStride[i]*dstH);
- out[i]= (uint8_t*) malloc(refStride[i]*h);
- if ((src[i] == NULL) || (dst[i] == NULL) || (out[i] == NULL)) {
- perror("Malloc");
- res = -1;
-
- goto end;
- }
- }
-
- dstContext = outContext = NULL;
- srcContext= sws_getContext(w, h, PIX_FMT_YUV420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL);
- if (srcContext == NULL) {
- fprintf(stderr, "Failed to get %s ---> %s\n",
- sws_format_name(PIX_FMT_YUV420P),
- sws_format_name(srcFormat));
- res = -1;
-
- goto end;
- }
- dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL);
- if (dstContext == NULL) {
- fprintf(stderr, "Failed to get %s ---> %s\n",
- sws_format_name(srcFormat),
- sws_format_name(dstFormat));
- res = -1;
-
- goto end;
- }
- outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUV420P, flags, NULL, NULL, NULL);
- if (outContext == NULL) {
- fprintf(stderr, "Failed to get %s ---> %s\n",
- sws_format_name(dstFormat),
- sws_format_name(PIX_FMT_YUV420P));
- res = -1;
-
- goto end;
- }
-// printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2],
-// (int)src[0], (int)src[1], (int)src[2]);
-
- sws_scale(srcContext, ref, refStride, 0, h , src, srcStride);
- sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
- sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
+static int doTest(uint8_t *ref[3], int refStride[3], int w, int h, int srcFormat, int dstFormat,
+ int srcW, int srcH, int dstW, int dstH, int flags){
+ uint8_t *src[3];
+ uint8_t *dst[3];
+ uint8_t *out[3];
+ int srcStride[3], dstStride[3];
+ int i;
+ uint64_t ssdY, ssdU, ssdV;
+ struct SwsContext *srcContext, *dstContext, *outContext;
+ int res;
+
+ res = 0;
+ for (i=0; i<3; i++){
+ // avoid stride % bpp != 0
+ if (srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24)
+ srcStride[i]= srcW*3;
+ else
+ srcStride[i]= srcW*4;
+
+ if (dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24)
+ dstStride[i]= dstW*3;
+ else
+ dstStride[i]= dstW*4;
+
+ src[i]= (uint8_t*) malloc(srcStride[i]*srcH);
+ dst[i]= (uint8_t*) malloc(dstStride[i]*dstH);
+ out[i]= (uint8_t*) malloc(refStride[i]*h);
+ if (!src[i] || !dst[i] || !out[i]) {
+ perror("Malloc");
+ res = -1;
+
+ goto end;
+ }
+ }
+
+ dstContext = outContext = NULL;
+ srcContext= sws_getContext(w, h, PIX_FMT_YUV420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL);
+ if (!srcContext) {
+ fprintf(stderr, "Failed to get %s ---> %s\n",
+ sws_format_name(PIX_FMT_YUV420P),
+ sws_format_name(srcFormat));
+ res = -1;
+
+ goto end;
+ }
+ dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL);
+ if (!dstContext) {
+ fprintf(stderr, "Failed to get %s ---> %s\n",
+ sws_format_name(srcFormat),
+ sws_format_name(dstFormat));
+ res = -1;
+
+ goto end;
+ }
+ outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUV420P, flags, NULL, NULL, NULL);
+ if (!outContext) {
+ fprintf(stderr, "Failed to get %s ---> %s\n",
+ sws_format_name(dstFormat),
+ sws_format_name(PIX_FMT_YUV420P));
+ res = -1;
+
+ goto end;
+ }
+// printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2],
+// (int)src[0], (int)src[1], (int)src[2]);
+
+ sws_scale(srcContext, ref, refStride, 0, h , src, srcStride);
+ sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
+ sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
#if defined(ARCH_X86)
- asm volatile ("emms\n\t");
+ asm volatile ("emms\n\t");
#endif
-
- ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
- ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);
- ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);
-
- if(srcFormat == PIX_FMT_GRAY8 || dstFormat==PIX_FMT_GRAY8) ssdU=ssdV=0; //FIXME check that output is really gray
-
- ssdY/= w*h;
- ssdU/= w*h/4;
- ssdV/= w*h/4;
-
- if(ssdY>100 || ssdU>100 || ssdV>100){
- printf(" %s %dx%d -> %s %4dx%4d flags=%2d SSD=%5lld,%5lld,%5lld\n",
- sws_format_name(srcFormat), srcW, srcH,
- sws_format_name(dstFormat), dstW, dstH,
- flags,
- ssdY, ssdU, ssdV);
- }
-
- end:
-
- sws_freeContext(srcContext);
- sws_freeContext(dstContext);
- sws_freeContext(outContext);
-
- for(i=0; i<3; i++){
- free(src[i]);
- free(dst[i]);
- free(out[i]);
- }
-
- return res;
+
+ ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
+ ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);
+ ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);
+
+ if (srcFormat == PIX_FMT_GRAY8 || dstFormat==PIX_FMT_GRAY8) ssdU=ssdV=0; //FIXME check that output is really gray
+
+ ssdY/= w*h;
+ ssdU/= w*h/4;
+ ssdV/= w*h/4;
+
+ if (ssdY>100 || ssdU>100 || ssdV>100){
+ printf(" %s %dx%d -> %s %4dx%4d flags=%2d SSD=%5lld,%5lld,%5lld\n",
+ sws_format_name(srcFormat), srcW, srcH,
+ sws_format_name(dstFormat), dstW, dstH,
+ flags,
+ ssdY, ssdU, ssdV);
+ }
+
+ end:
+
+ sws_freeContext(srcContext);
+ sws_freeContext(dstContext);
+ sws_freeContext(outContext);
+
+ for (i=0; i<3; i++){
+ free(src[i]);
+ free(dst[i]);
+ free(out[i]);
+ }
+
+ return res;
}
void fast_memcpy(void *a, void *b, int s){ //FIXME
@@ -161,69 +161,69 @@ void fast_memcpy(void *a, void *b, int s){ //FIXME
}
static void selfTest(uint8_t *src[3], int stride[3], int w, int h){
- enum PixelFormat srcFormat, dstFormat;
- int srcW, srcH, dstW, dstH;
- int flags;
-
- for(srcFormat = 0; srcFormat < PIX_FMT_NB; srcFormat++) {
- for(dstFormat = 0; dstFormat < PIX_FMT_NB; dstFormat++) {
- printf("%s -> %s\n",
- sws_format_name(srcFormat),
- sws_format_name(dstFormat));
-
- srcW= w;
- srcH= h;
- for(dstW=w - w/3; dstW<= 4*w/3; dstW+= w/3){
- for(dstH=h - h/3; dstH<= 4*h/3; dstH+= h/3){
- for(flags=1; flags<33; flags*=2) {
- int res;
-
- res = doTest(src, stride, w, h, srcFormat, dstFormat,
- srcW, srcH, dstW, dstH, flags);
- if (res < 0) {
- dstW = 4 * w / 3;
- dstH = 4 * h / 3;
- flags = 33;
- }
- }
- }
- }
- }
- }
+ enum PixelFormat srcFormat, dstFormat;
+ int srcW, srcH, dstW, dstH;
+ int flags;
+
+ for (srcFormat = 0; srcFormat < PIX_FMT_NB; srcFormat++) {
+ for (dstFormat = 0; dstFormat < PIX_FMT_NB; dstFormat++) {
+ printf("%s -> %s\n",
+ sws_format_name(srcFormat),
+ sws_format_name(dstFormat));
+
+ srcW= w;
+ srcH= h;
+ for (dstW=w - w/3; dstW<= 4*w/3; dstW+= w/3){
+ for (dstH=h - h/3; dstH<= 4*h/3; dstH+= h/3){
+ for (flags=1; flags<33; flags*=2) {
+ int res;
+
+ res = doTest(src, stride, w, h, srcFormat, dstFormat,
+ srcW, srcH, dstW, dstH, flags);
+ if (res < 0) {
+ dstW = 4 * w / 3;
+ dstH = 4 * h / 3;
+ flags = 33;
+ }
+ }
+ }
+ }
+ }
+ }
}
#define W 96
#define H 96
int main(int argc, char **argv){
- uint8_t rgb_data[W*H*4];
- uint8_t *rgb_src[3]= {rgb_data, NULL, NULL};
- int rgb_stride[3]={4*W, 0, 0};
- uint8_t data[3][W*H];
- uint8_t *src[3]= {data[0], data[1], data[2]};
- int stride[3]={W, W, W};
- int x, y;
- struct SwsContext *sws;
-
- sws= sws_getContext(W/12, H/12, PIX_FMT_RGB32, W, H, PIX_FMT_YUV420P, 2, NULL, NULL, NULL);
-
- for(y=0; y<H; y++){
- for(x=0; x<W*4; x++){
- rgb_data[ x + y*4*W]= random();
- }
- }
+ uint8_t *rgb_data = malloc (W*H*4);
+ uint8_t *rgb_src[3]= {rgb_data, NULL, NULL};
+ int rgb_stride[3]={4*W, 0, 0};
+ uint8_t *data = malloc (3*W*H);
+ uint8_t *src[3]= {data, data+W*H, data+W*H*2};
+ int stride[3]={W, W, W};
+ int x, y;
+ struct SwsContext *sws;
+
+ sws= sws_getContext(W/12, H/12, PIX_FMT_RGB32, W, H, PIX_FMT_YUV420P, 2, NULL, NULL, NULL);
+
+ for (y=0; y<H; y++){
+ for (x=0; x<W*4; x++){
+ rgb_data[ x + y*4*W]= random();
+ }
+ }
#if defined(ARCH_X86)
- sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0);
+ sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0);
#else
- sws_rgb2rgb_init(0);
+ sws_rgb2rgb_init(0);
#endif
- sws_scale(sws, rgb_src, rgb_stride, 0, H , src, stride);
+ sws_scale(sws, rgb_src, rgb_stride, 0, H, src, stride);
#if defined(ARCH_X86)
- asm volatile ("emms\n\t");
+ asm volatile ("emms\n\t");
#endif
- selfTest(src, stride, W, H);
+ selfTest(src, stride, W, H);
- return 123;
+ return 123;
}
diff --git a/contrib/ffmpeg/libswscale/swscale.c b/contrib/ffmpeg/libswscale/swscale.c
index f6a23425b..4dc2c322e 100644
--- a/contrib/ffmpeg/libswscale/swscale.c
+++ b/contrib/ffmpeg/libswscale/swscale.c
@@ -15,7 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* the C code (not assembly, mmx, ...) of this file can be used
* under the LGPL license too
@@ -25,7 +25,7 @@
supported Input formats: YV12, I420/IYUV, YUY2, UYVY, BGR32, BGR24, BGR16, BGR15, RGB32, RGB24, Y8/Y800, YVU9/IF09, PAL8
supported output formats: YV12, I420/IYUV, YUY2, UYVY, {BGR,RGB}{1,4,8,15,16,24,32}, Y8/Y800, YVU9/IF09
{BGR,RGB}{1,4,8,15,16} support dithering
-
+
unscaled special converters (YV12=I420=IYUV, Y800=Y8)
YV12 -> {BGR,RGB}{1,4,8,15,16,24,32}
x -> x
@@ -37,7 +37,7 @@
BGR15 -> BGR16
*/
-/*
+/*
tested special converters (most are tested actually but i didnt write it down ...)
YV12 -> BGR16
YV12 -> YV12
@@ -46,7 +46,7 @@ tested special converters (most are tested actually but i didnt write it down ..
YVU9 -> YV12
untested special converters
- YV12/I420 -> BGR15/BGR24/BGR32 (its the yuv2rgb stuff, so it should be ok)
+ YV12/I420 -> BGR15/BGR24/BGR32 (it is the yuv2rgb stuff, so it should be ok)
YV12/I420 -> YV12/I420
YUY2/BGR15/BGR24/BGR32/RGB24/RGB32 -> same format
BGR24 -> BGR32 & RGB24 -> RGB32
@@ -72,9 +72,7 @@ untested special converters
#include "x86_cpu.h"
#include "bswap.h"
#include "rgb2rgb.h"
-#ifdef USE_FASTMEMCPY
-#include "libvo/fastmemcpy.h"
-#endif
+#include "libavcodec/opt.h"
#undef MOVNTQ
#undef PAVGB
@@ -102,22 +100,56 @@ untested special converters
#define PI 3.14159265358979323846
#endif
-#define isSupportedIn(x) ((x)==PIX_FMT_YUV420P || (x)==PIX_FMT_YUYV422 || (x)==PIX_FMT_UYVY422\
- || (x)==PIX_FMT_RGB32|| (x)==PIX_FMT_BGR24|| (x)==PIX_FMT_BGR565|| (x)==PIX_FMT_BGR555\
- || (x)==PIX_FMT_BGR32|| (x)==PIX_FMT_RGB24|| (x)==PIX_FMT_RGB565|| (x)==PIX_FMT_RGB555\
- || (x)==PIX_FMT_GRAY8 || (x)==PIX_FMT_YUV410P\
- || (x)==PIX_FMT_GRAY16BE || (x)==PIX_FMT_GRAY16LE\
- || (x)==PIX_FMT_YUV444P || (x)==PIX_FMT_YUV422P || (x)==PIX_FMT_YUV411P\
- || (x)==PIX_FMT_PAL8 || (x)==PIX_FMT_BGR8 || (x)==PIX_FMT_RGB8\
- || (x)==PIX_FMT_BGR4_BYTE || (x)==PIX_FMT_RGB4_BYTE)
-#define isSupportedOut(x) ((x)==PIX_FMT_YUV420P || (x)==PIX_FMT_YUYV422 || (x)==PIX_FMT_UYVY422\
- || (x)==PIX_FMT_YUV444P || (x)==PIX_FMT_YUV422P || (x)==PIX_FMT_YUV411P\
- || isRGB(x) || isBGR(x)\
- || (x)==PIX_FMT_NV12 || (x)==PIX_FMT_NV21\
- || (x)==PIX_FMT_GRAY16BE || (x)==PIX_FMT_GRAY16LE\
- || (x)==PIX_FMT_GRAY8 || (x)==PIX_FMT_YUV410P)
-#define isPacked(x) ((x)==PIX_FMT_PAL8 || (x)==PIX_FMT_YUYV422 ||\
- (x)==PIX_FMT_UYVY422 || isRGB(x) || isBGR(x))
+#define isSupportedIn(x) ( \
+ (x)==PIX_FMT_YUV420P \
+ || (x)==PIX_FMT_YUVA420P \
+ || (x)==PIX_FMT_YUYV422 \
+ || (x)==PIX_FMT_UYVY422 \
+ || (x)==PIX_FMT_RGB32 \
+ || (x)==PIX_FMT_BGR24 \
+ || (x)==PIX_FMT_BGR565 \
+ || (x)==PIX_FMT_BGR555 \
+ || (x)==PIX_FMT_BGR32 \
+ || (x)==PIX_FMT_RGB24 \
+ || (x)==PIX_FMT_RGB565 \
+ || (x)==PIX_FMT_RGB555 \
+ || (x)==PIX_FMT_GRAY8 \
+ || (x)==PIX_FMT_YUV410P \
+ || (x)==PIX_FMT_GRAY16BE \
+ || (x)==PIX_FMT_GRAY16LE \
+ || (x)==PIX_FMT_YUV444P \
+ || (x)==PIX_FMT_YUV422P \
+ || (x)==PIX_FMT_YUV411P \
+ || (x)==PIX_FMT_PAL8 \
+ || (x)==PIX_FMT_BGR8 \
+ || (x)==PIX_FMT_RGB8 \
+ || (x)==PIX_FMT_BGR4_BYTE \
+ || (x)==PIX_FMT_RGB4_BYTE \
+ || (x)==PIX_FMT_YUV440P \
+ )
+#define isSupportedOut(x) ( \
+ (x)==PIX_FMT_YUV420P \
+ || (x)==PIX_FMT_YUYV422 \
+ || (x)==PIX_FMT_UYVY422 \
+ || (x)==PIX_FMT_YUV444P \
+ || (x)==PIX_FMT_YUV422P \
+ || (x)==PIX_FMT_YUV411P \
+ || isRGB(x) \
+ || isBGR(x) \
+ || (x)==PIX_FMT_NV12 \
+ || (x)==PIX_FMT_NV21 \
+ || (x)==PIX_FMT_GRAY16BE \
+ || (x)==PIX_FMT_GRAY16LE \
+ || (x)==PIX_FMT_GRAY8 \
+ || (x)==PIX_FMT_YUV410P \
+ )
+#define isPacked(x) ( \
+ (x)==PIX_FMT_PAL8 \
+ || (x)==PIX_FMT_YUYV422 \
+ || (x)==PIX_FMT_UYVY422 \
+ || isRGB(x) \
+ || isBGR(x) \
+ )
#define RGB2YUV_SHIFT 16
#define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5))
@@ -137,7 +169,7 @@ NOTES
Special versions: fast Y 1:1 scaling (no interpolation in y direction)
TODO
-more intelligent missalignment avoidance for the horizontal scaler
+more intelligent misalignment avoidance for the horizontal scaler
write special vertical cubic upscale version
Optimize C code (yv12 / minmax)
add support for packed pixel yuv input & output
@@ -148,58 +180,58 @@ write special BGR->BGR scaler
*/
#if defined(ARCH_X86) && defined (CONFIG_GPL)
-static uint64_t attribute_used __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
-static uint64_t attribute_used __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
-static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
-static uint64_t attribute_used __attribute__((aligned(8))) w02= 0x0002000200020002LL;
-static uint64_t attribute_used __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
-static uint64_t attribute_used __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
-static uint64_t attribute_used __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
-static uint64_t attribute_used __attribute__((aligned(8))) bm01010101=0x00FF00FF00FF00FFLL;
+DECLARE_ASM_CONST(8, uint64_t, bF8)= 0xF8F8F8F8F8F8F8F8LL;
+DECLARE_ASM_CONST(8, uint64_t, bFC)= 0xFCFCFCFCFCFCFCFCLL;
+DECLARE_ASM_CONST(8, uint64_t, w10)= 0x0010001000100010LL;
+DECLARE_ASM_CONST(8, uint64_t, w02)= 0x0002000200020002LL;
+DECLARE_ASM_CONST(8, uint64_t, bm00001111)=0x00000000FFFFFFFFLL;
+DECLARE_ASM_CONST(8, uint64_t, bm00000111)=0x0000000000FFFFFFLL;
+DECLARE_ASM_CONST(8, uint64_t, bm11111000)=0xFFFFFFFFFF000000LL;
+DECLARE_ASM_CONST(8, uint64_t, bm01010101)=0x00FF00FF00FF00FFLL;
static volatile uint64_t attribute_used __attribute__((aligned(8))) b5Dither;
static volatile uint64_t attribute_used __attribute__((aligned(8))) g5Dither;
static volatile uint64_t attribute_used __attribute__((aligned(8))) g6Dither;
static volatile uint64_t attribute_used __attribute__((aligned(8))) r5Dither;
-static uint64_t __attribute__((aligned(8))) dither4[2]={
- 0x0103010301030103LL,
- 0x0200020002000200LL,};
+const DECLARE_ALIGNED(8, uint64_t, ff_dither4[2]) = {
+ 0x0103010301030103LL,
+ 0x0200020002000200LL,};
-static uint64_t __attribute__((aligned(8))) dither8[2]={
- 0x0602060206020602LL,
- 0x0004000400040004LL,};
+const DECLARE_ALIGNED(8, uint64_t, ff_dither8[2]) = {
+ 0x0602060206020602LL,
+ 0x0004000400040004LL,};
-static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
-static uint64_t attribute_used __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
-static uint64_t attribute_used __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
-static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
-static uint64_t attribute_used __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
-static uint64_t attribute_used __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
+DECLARE_ASM_CONST(8, uint64_t, b16Mask)= 0x001F001F001F001FLL;
+DECLARE_ASM_CONST(8, uint64_t, g16Mask)= 0x07E007E007E007E0LL;
+DECLARE_ASM_CONST(8, uint64_t, r16Mask)= 0xF800F800F800F800LL;
+DECLARE_ASM_CONST(8, uint64_t, b15Mask)= 0x001F001F001F001FLL;
+DECLARE_ASM_CONST(8, uint64_t, g15Mask)= 0x03E003E003E003E0LL;
+DECLARE_ASM_CONST(8, uint64_t, r15Mask)= 0x7C007C007C007C00LL;
-static uint64_t attribute_used __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
-static uint64_t attribute_used __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
-static uint64_t attribute_used __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
+DECLARE_ALIGNED(8, const uint64_t, ff_M24A) = 0x00FF0000FF0000FFLL;
+DECLARE_ALIGNED(8, const uint64_t, ff_M24B) = 0xFF0000FF0000FF00LL;
+DECLARE_ALIGNED(8, const uint64_t, ff_M24C) = 0x0000FF0000FF0000LL;
#ifdef FAST_BGR2YV12
-static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000000210041000DULL;
-static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
-static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff) = 0x000000210041000DULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff) = 0x0000FFEEFFDC0038ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff) = 0x00000038FFD2FFF8ULL;
#else
-static const uint64_t bgr2YCoeff attribute_used __attribute__((aligned(8))) = 0x000020E540830C8BULL;
-static const uint64_t bgr2UCoeff attribute_used __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
-static const uint64_t bgr2VCoeff attribute_used __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff) = 0x000020E540830C8BULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff) = 0x0000ED0FDAC23831ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff) = 0x00003831D0E6F6EAULL;
#endif /* FAST_BGR2YV12 */
-static const uint64_t bgr2YOffset attribute_used __attribute__((aligned(8))) = 0x1010101010101010ULL;
-static const uint64_t bgr2UVOffset attribute_used __attribute__((aligned(8)))= 0x8080808080808080ULL;
-static const uint64_t w1111 attribute_used __attribute__((aligned(8))) = 0x0001000100010001ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YOffset) = 0x1010101010101010ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL;
+DECLARE_ALIGNED(8, const uint64_t, ff_w1111) = 0x0001000100010001ULL;
#endif /* defined(ARCH_X86) */
// clipping helper table for C implementations:
static unsigned char clip_table[768];
static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b);
-
+
extern const uint8_t dither_2x2_4[2][8];
extern const uint8_t dither_2x2_8[2][8];
extern const uint8_t dither_8x8_32[8][8];
@@ -210,13 +242,47 @@ static const char * sws_context_to_name(void * ptr) {
return "swscaler";
}
-static AVClass sws_context_class = { "SWScaler", sws_context_to_name, NULL };
+#define OFFSET(x) offsetof(SwsContext, x)
+#define DEFAULT 0
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+ { "sws_flags", "scaler/cpu flags", OFFSET(flags), FF_OPT_TYPE_FLAGS, DEFAULT, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "fast_bilinear", "fast bilinear", 0, FF_OPT_TYPE_CONST, SWS_FAST_BILINEAR, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bilinear", "bilinear", 0, FF_OPT_TYPE_CONST, SWS_BILINEAR, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bicubic", "bicubic", 0, FF_OPT_TYPE_CONST, SWS_BICUBIC, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "experimental", "experimental", 0, FF_OPT_TYPE_CONST, SWS_X, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "neighbor", "nearest neighbor", 0, FF_OPT_TYPE_CONST, SWS_POINT, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "area", "averaging area", 0, FF_OPT_TYPE_CONST, SWS_AREA, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bicublin", "luma bicubic, chroma bilinear", 0, FF_OPT_TYPE_CONST, SWS_BICUBLIN, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "gauss", "gaussian", 0, FF_OPT_TYPE_CONST, SWS_GAUSS, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "sinc", "sinc", 0, FF_OPT_TYPE_CONST, SWS_SINC, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "lanczos", "lanczos", 0, FF_OPT_TYPE_CONST, SWS_LANCZOS, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "spline", "natural bicubic spline", 0, FF_OPT_TYPE_CONST, SWS_SPLINE, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "print_info", "print info", 0, FF_OPT_TYPE_CONST, SWS_PRINT_INFO, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "accurate_rnd", "accurate rounding", 0, FF_OPT_TYPE_CONST, SWS_ACCURATE_RND, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "mmx", "MMX SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_MMX, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "mmx2", "MMX2 SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_MMX2, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "3dnow", "3DNOW SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_3DNOW, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "altivec", "AltiVec SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_ALTIVEC, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "bfin", "Blackfin SIMD acceleration", 0, FF_OPT_TYPE_CONST, SWS_CPU_CAPS_BFIN, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "full_chroma_int", "full chroma interpolation", 0 , FF_OPT_TYPE_CONST, SWS_FULL_CHR_H_INT, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { "full_chroma_inp", "full chroma input", 0 , FF_OPT_TYPE_CONST, SWS_FULL_CHR_H_INP, INT_MIN, INT_MAX, VE, "sws_flags" },
+ { NULL }
+};
+
+#undef VE
+#undef DEFAULT
+
+static AVClass sws_context_class = { "SWScaler", sws_context_to_name, options };
char *sws_format_name(enum PixelFormat format)
{
switch (format) {
case PIX_FMT_YUV420P:
return "yuv420p";
+ case PIX_FMT_YUVA420P:
+ return "yuva420p";
case PIX_FMT_YUYV422:
return "yuyv422";
case PIX_FMT_RGB24:
@@ -289,6 +355,8 @@ char *sws_format_name(enum PixelFormat format)
return "nv12";
case PIX_FMT_NV21:
return "nv21";
+ case PIX_FMT_YUV440P:
+ return "yuv440p";
default:
return "Unknown format";
}
@@ -297,511 +365,511 @@ char *sws_format_name(enum PixelFormat format)
#if defined(ARCH_X86) && defined (CONFIG_GPL)
void in_asm_used_var_warning_killer()
{
- volatile int i= bF8+bFC+w10+
- bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+
- M24A+M24B+M24C+w02 + b5Dither+g5Dither+r5Dither+g6Dither+dither4[0]+dither8[0]+bm01010101;
- if(i) i=0;
+ volatile int i= bF8+bFC+w10+
+ bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+
+ ff_M24A+ff_M24B+ff_M24C+w02 + b5Dither+g5Dither+r5Dither+g6Dither+ff_dither4[0]+ff_dither8[0]+bm01010101;
+ if (i) i=0;
}
#endif
static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
{
- //FIXME Optimize (just quickly writen not opti..)
- int i;
- for(i=0; i<dstW; i++)
- {
- int val=1<<18;
- int j;
- for(j=0; j<lumFilterSize; j++)
- val += lumSrc[j][i] * lumFilter[j];
-
- dest[i]= av_clip_uint8(val>>19);
- }
-
- if(uDest != NULL)
- for(i=0; i<chrDstW; i++)
- {
- int u=1<<18;
- int v=1<<18;
- int j;
- for(j=0; j<chrFilterSize; j++)
- {
- u += chrSrc[j][i] * chrFilter[j];
- v += chrSrc[j][i + 2048] * chrFilter[j];
- }
-
- uDest[i]= av_clip_uint8(u>>19);
- vDest[i]= av_clip_uint8(v>>19);
- }
+ //FIXME Optimize (just quickly writen not opti..)
+ int i;
+ for (i=0; i<dstW; i++)
+ {
+ int val=1<<18;
+ int j;
+ for (j=0; j<lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
+
+ dest[i]= av_clip_uint8(val>>19);
+ }
+
+ if (uDest)
+ for (i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for (j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[i]= av_clip_uint8(u>>19);
+ vDest[i]= av_clip_uint8(v>>19);
+ }
}
static inline void yuv2nv12XinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
{
- //FIXME Optimize (just quickly writen not opti..)
- int i;
- for(i=0; i<dstW; i++)
- {
- int val=1<<18;
- int j;
- for(j=0; j<lumFilterSize; j++)
- val += lumSrc[j][i] * lumFilter[j];
-
- dest[i]= av_clip_uint8(val>>19);
- }
-
- if(uDest == NULL)
- return;
-
- if(dstFormat == PIX_FMT_NV12)
- for(i=0; i<chrDstW; i++)
- {
- int u=1<<18;
- int v=1<<18;
- int j;
- for(j=0; j<chrFilterSize; j++)
- {
- u += chrSrc[j][i] * chrFilter[j];
- v += chrSrc[j][i + 2048] * chrFilter[j];
- }
-
- uDest[2*i]= av_clip_uint8(u>>19);
- uDest[2*i+1]= av_clip_uint8(v>>19);
- }
- else
- for(i=0; i<chrDstW; i++)
- {
- int u=1<<18;
- int v=1<<18;
- int j;
- for(j=0; j<chrFilterSize; j++)
- {
- u += chrSrc[j][i] * chrFilter[j];
- v += chrSrc[j][i + 2048] * chrFilter[j];
- }
-
- uDest[2*i]= av_clip_uint8(v>>19);
- uDest[2*i+1]= av_clip_uint8(u>>19);
- }
+ //FIXME Optimize (just quickly writen not opti..)
+ int i;
+ for (i=0; i<dstW; i++)
+ {
+ int val=1<<18;
+ int j;
+ for (j=0; j<lumFilterSize; j++)
+ val += lumSrc[j][i] * lumFilter[j];
+
+ dest[i]= av_clip_uint8(val>>19);
+ }
+
+ if (!uDest)
+ return;
+
+ if (dstFormat == PIX_FMT_NV12)
+ for (i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for (j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[2*i]= av_clip_uint8(u>>19);
+ uDest[2*i+1]= av_clip_uint8(v>>19);
+ }
+ else
+ for (i=0; i<chrDstW; i++)
+ {
+ int u=1<<18;
+ int v=1<<18;
+ int j;
+ for (j=0; j<chrFilterSize; j++)
+ {
+ u += chrSrc[j][i] * chrFilter[j];
+ v += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+
+ uDest[2*i]= av_clip_uint8(v>>19);
+ uDest[2*i+1]= av_clip_uint8(u>>19);
+ }
}
#define YSCALE_YUV_2_PACKEDX_C(type) \
- for(i=0; i<(dstW>>1); i++){\
- int j;\
- int Y1=1<<18;\
- int Y2=1<<18;\
- int U=1<<18;\
- int V=1<<18;\
- type attribute_unused *r, *b, *g;\
- const int i2= 2*i;\
- \
- for(j=0; j<lumFilterSize; j++)\
- {\
- Y1 += lumSrc[j][i2] * lumFilter[j];\
- Y2 += lumSrc[j][i2+1] * lumFilter[j];\
- }\
- for(j=0; j<chrFilterSize; j++)\
- {\
- U += chrSrc[j][i] * chrFilter[j];\
- V += chrSrc[j][i+2048] * chrFilter[j];\
- }\
- Y1>>=19;\
- Y2>>=19;\
- U >>=19;\
- V >>=19;\
- if((Y1|Y2|U|V)&256)\
- {\
- if(Y1>255) Y1=255;\
- else if(Y1<0)Y1=0;\
- if(Y2>255) Y2=255;\
- else if(Y2<0)Y2=0;\
- if(U>255) U=255;\
- else if(U<0) U=0;\
- if(V>255) V=255;\
- else if(V<0) V=0;\
- }
-
+ for (i=0; i<(dstW>>1); i++){\
+ int j;\
+ int Y1 = 1<<18;\
+ int Y2 = 1<<18;\
+ int U = 1<<18;\
+ int V = 1<<18;\
+ type av_unused *r, *b, *g;\
+ const int i2= 2*i;\
+ \
+ for (j=0; j<lumFilterSize; j++)\
+ {\
+ Y1 += lumSrc[j][i2] * lumFilter[j];\
+ Y2 += lumSrc[j][i2+1] * lumFilter[j];\
+ }\
+ for (j=0; j<chrFilterSize; j++)\
+ {\
+ U += chrSrc[j][i] * chrFilter[j];\
+ V += chrSrc[j][i+2048] * chrFilter[j];\
+ }\
+ Y1>>=19;\
+ Y2>>=19;\
+ U >>=19;\
+ V >>=19;\
+ if ((Y1|Y2|U|V)&256)\
+ {\
+ if (Y1>255) Y1=255; \
+ else if (Y1<0)Y1=0; \
+ if (Y2>255) Y2=255; \
+ else if (Y2<0)Y2=0; \
+ if (U>255) U=255; \
+ else if (U<0) U=0; \
+ if (V>255) V=255; \
+ else if (V<0) V=0; \
+ }
+
#define YSCALE_YUV_2_RGBX_C(type) \
- YSCALE_YUV_2_PACKEDX_C(type)\
- r = (type *)c->table_rV[V];\
- g = (type *)(c->table_gU[U] + c->table_gV[V]);\
- b = (type *)c->table_bU[U];\
-
-#define YSCALE_YUV_2_PACKED2_C \
- for(i=0; i<(dstW>>1); i++){\
- const int i2= 2*i;\
- int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>19;\
- int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19;\
- int U= (uvbuf0[i ]*uvalpha1+uvbuf1[i ]*uvalpha)>>19;\
- int V= (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19;\
+ YSCALE_YUV_2_PACKEDX_C(type) \
+ r = (type *)c->table_rV[V]; \
+ g = (type *)(c->table_gU[U] + c->table_gV[V]); \
+ b = (type *)c->table_bU[U]; \
+
+#define YSCALE_YUV_2_PACKED2_C \
+ for (i=0; i<(dstW>>1); i++){ \
+ const int i2= 2*i; \
+ int Y1= (buf0[i2 ]*yalpha1+buf1[i2 ]*yalpha)>>19; \
+ int Y2= (buf0[i2+1]*yalpha1+buf1[i2+1]*yalpha)>>19; \
+ int U= (uvbuf0[i ]*uvalpha1+uvbuf1[i ]*uvalpha)>>19; \
+ int V= (uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19; \
#define YSCALE_YUV_2_RGB2_C(type) \
- YSCALE_YUV_2_PACKED2_C\
- type *r, *b, *g;\
- r = (type *)c->table_rV[V];\
- g = (type *)(c->table_gU[U] + c->table_gV[V]);\
- b = (type *)c->table_bU[U];\
+ YSCALE_YUV_2_PACKED2_C\
+ type *r, *b, *g;\
+ r = (type *)c->table_rV[V];\
+ g = (type *)(c->table_gU[U] + c->table_gV[V]);\
+ b = (type *)c->table_bU[U];\
#define YSCALE_YUV_2_PACKED1_C \
- for(i=0; i<(dstW>>1); i++){\
- const int i2= 2*i;\
- int Y1= buf0[i2 ]>>7;\
- int Y2= buf0[i2+1]>>7;\
- int U= (uvbuf1[i ])>>7;\
- int V= (uvbuf1[i+2048])>>7;\
+ for (i=0; i<(dstW>>1); i++){\
+ const int i2= 2*i;\
+ int Y1= buf0[i2 ]>>7;\
+ int Y2= buf0[i2+1]>>7;\
+ int U= (uvbuf1[i ])>>7;\
+ int V= (uvbuf1[i+2048])>>7;\
#define YSCALE_YUV_2_RGB1_C(type) \
- YSCALE_YUV_2_PACKED1_C\
- type *r, *b, *g;\
- r = (type *)c->table_rV[V];\
- g = (type *)(c->table_gU[U] + c->table_gV[V]);\
- b = (type *)c->table_bU[U];\
+ YSCALE_YUV_2_PACKED1_C\
+ type *r, *b, *g;\
+ r = (type *)c->table_rV[V];\
+ g = (type *)(c->table_gU[U] + c->table_gV[V]);\
+ b = (type *)c->table_bU[U];\
#define YSCALE_YUV_2_PACKED1B_C \
- for(i=0; i<(dstW>>1); i++){\
- const int i2= 2*i;\
- int Y1= buf0[i2 ]>>7;\
- int Y2= buf0[i2+1]>>7;\
- int U= (uvbuf0[i ] + uvbuf1[i ])>>8;\
- int V= (uvbuf0[i+2048] + uvbuf1[i+2048])>>8;\
+ for (i=0; i<(dstW>>1); i++){\
+ const int i2= 2*i;\
+ int Y1= buf0[i2 ]>>7;\
+ int Y2= buf0[i2+1]>>7;\
+ int U= (uvbuf0[i ] + uvbuf1[i ])>>8;\
+ int V= (uvbuf0[i+2048] + uvbuf1[i+2048])>>8;\
#define YSCALE_YUV_2_RGB1B_C(type) \
- YSCALE_YUV_2_PACKED1B_C\
- type *r, *b, *g;\
- r = (type *)c->table_rV[V];\
- g = (type *)(c->table_gU[U] + c->table_gV[V]);\
- b = (type *)c->table_bU[U];\
+ YSCALE_YUV_2_PACKED1B_C\
+ type *r, *b, *g;\
+ r = (type *)c->table_rV[V];\
+ g = (type *)(c->table_gU[U] + c->table_gV[V]);\
+ b = (type *)c->table_bU[U];\
#define YSCALE_YUV_2_ANYRGB_C(func, func2)\
- switch(c->dstFormat)\
- {\
- case PIX_FMT_RGB32:\
- case PIX_FMT_BGR32:\
- func(uint32_t)\
- ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
- ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
- } \
- break;\
- case PIX_FMT_RGB24:\
- func(uint8_t)\
- ((uint8_t*)dest)[0]= r[Y1];\
- ((uint8_t*)dest)[1]= g[Y1];\
- ((uint8_t*)dest)[2]= b[Y1];\
- ((uint8_t*)dest)[3]= r[Y2];\
- ((uint8_t*)dest)[4]= g[Y2];\
- ((uint8_t*)dest)[5]= b[Y2];\
- dest+=6;\
- }\
- break;\
- case PIX_FMT_BGR24:\
- func(uint8_t)\
- ((uint8_t*)dest)[0]= b[Y1];\
- ((uint8_t*)dest)[1]= g[Y1];\
- ((uint8_t*)dest)[2]= r[Y1];\
- ((uint8_t*)dest)[3]= b[Y2];\
- ((uint8_t*)dest)[4]= g[Y2];\
- ((uint8_t*)dest)[5]= r[Y2];\
- dest+=6;\
- }\
- break;\
- case PIX_FMT_RGB565:\
- case PIX_FMT_BGR565:\
- {\
- const int dr1= dither_2x2_8[y&1 ][0];\
- const int dg1= dither_2x2_4[y&1 ][0];\
- const int db1= dither_2x2_8[(y&1)^1][0];\
- const int dr2= dither_2x2_8[y&1 ][1];\
- const int dg2= dither_2x2_4[y&1 ][1];\
- const int db2= dither_2x2_8[(y&1)^1][1];\
- func(uint16_t)\
- ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
- ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
- }\
- }\
- break;\
- case PIX_FMT_RGB555:\
- case PIX_FMT_BGR555:\
- {\
- const int dr1= dither_2x2_8[y&1 ][0];\
- const int dg1= dither_2x2_8[y&1 ][1];\
- const int db1= dither_2x2_8[(y&1)^1][0];\
- const int dr2= dither_2x2_8[y&1 ][1];\
- const int dg2= dither_2x2_8[y&1 ][0];\
- const int db2= dither_2x2_8[(y&1)^1][1];\
- func(uint16_t)\
- ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
- ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
- }\
- }\
- break;\
- case PIX_FMT_RGB8:\
- case PIX_FMT_BGR8:\
- {\
- const uint8_t * const d64= dither_8x8_73[y&7];\
- const uint8_t * const d32= dither_8x8_32[y&7];\
- func(uint8_t)\
- ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\
- ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\
- }\
- }\
- break;\
- case PIX_FMT_RGB4:\
- case PIX_FMT_BGR4:\
- {\
- const uint8_t * const d64= dither_8x8_73 [y&7];\
- const uint8_t * const d128=dither_8x8_220[y&7];\
- func(uint8_t)\
- ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\
- + ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\
- }\
- }\
- break;\
- case PIX_FMT_RGB4_BYTE:\
- case PIX_FMT_BGR4_BYTE:\
- {\
- const uint8_t * const d64= dither_8x8_73 [y&7];\
- const uint8_t * const d128=dither_8x8_220[y&7];\
- func(uint8_t)\
- ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\
- ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\
- }\
- }\
- break;\
- case PIX_FMT_MONOBLACK:\
- {\
- const uint8_t * const d128=dither_8x8_220[y&7];\
- uint8_t *g= c->table_gU[128] + c->table_gV[128];\
- for(i=0; i<dstW-7; i+=8){\
- int acc;\
- acc = g[((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19) + d128[0]];\
- acc+= acc + g[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19) + d128[1]];\
- acc+= acc + g[((buf0[i+2]*yalpha1+buf1[i+2]*yalpha)>>19) + d128[2]];\
- acc+= acc + g[((buf0[i+3]*yalpha1+buf1[i+3]*yalpha)>>19) + d128[3]];\
- acc+= acc + g[((buf0[i+4]*yalpha1+buf1[i+4]*yalpha)>>19) + d128[4]];\
- acc+= acc + g[((buf0[i+5]*yalpha1+buf1[i+5]*yalpha)>>19) + d128[5]];\
- acc+= acc + g[((buf0[i+6]*yalpha1+buf1[i+6]*yalpha)>>19) + d128[6]];\
- acc+= acc + g[((buf0[i+7]*yalpha1+buf1[i+7]*yalpha)>>19) + d128[7]];\
- ((uint8_t*)dest)[0]= acc;\
- dest++;\
- }\
+ switch(c->dstFormat)\
+ {\
+ case PIX_FMT_RGB32:\
+ case PIX_FMT_BGR32:\
+ func(uint32_t)\
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];\
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];\
+ } \
+ break;\
+ case PIX_FMT_RGB24:\
+ func(uint8_t)\
+ ((uint8_t*)dest)[0]= r[Y1];\
+ ((uint8_t*)dest)[1]= g[Y1];\
+ ((uint8_t*)dest)[2]= b[Y1];\
+ ((uint8_t*)dest)[3]= r[Y2];\
+ ((uint8_t*)dest)[4]= g[Y2];\
+ ((uint8_t*)dest)[5]= b[Y2];\
+ dest+=6;\
+ }\
+ break;\
+ case PIX_FMT_BGR24:\
+ func(uint8_t)\
+ ((uint8_t*)dest)[0]= b[Y1];\
+ ((uint8_t*)dest)[1]= g[Y1];\
+ ((uint8_t*)dest)[2]= r[Y1];\
+ ((uint8_t*)dest)[3]= b[Y2];\
+ ((uint8_t*)dest)[4]= g[Y2];\
+ ((uint8_t*)dest)[5]= r[Y2];\
+ dest+=6;\
+ }\
+ break;\
+ case PIX_FMT_RGB565:\
+ case PIX_FMT_BGR565:\
+ {\
+ const int dr1= dither_2x2_8[y&1 ][0];\
+ const int dg1= dither_2x2_4[y&1 ][0];\
+ const int db1= dither_2x2_8[(y&1)^1][0];\
+ const int dr2= dither_2x2_8[y&1 ][1];\
+ const int dg2= dither_2x2_4[y&1 ][1];\
+ const int db2= dither_2x2_8[(y&1)^1][1];\
+ func(uint16_t)\
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+ }\
+ }\
+ break;\
+ case PIX_FMT_RGB555:\
+ case PIX_FMT_BGR555:\
+ {\
+ const int dr1= dither_2x2_8[y&1 ][0];\
+ const int dg1= dither_2x2_8[y&1 ][1];\
+ const int db1= dither_2x2_8[(y&1)^1][0];\
+ const int dr2= dither_2x2_8[y&1 ][1];\
+ const int dg2= dither_2x2_8[y&1 ][0];\
+ const int db2= dither_2x2_8[(y&1)^1][1];\
+ func(uint16_t)\
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];\
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];\
+ }\
+ }\
+ break;\
+ case PIX_FMT_RGB8:\
+ case PIX_FMT_BGR8:\
+ {\
+ const uint8_t * const d64= dither_8x8_73[y&7];\
+ const uint8_t * const d32= dither_8x8_32[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];\
+ ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];\
+ }\
+ }\
+ break;\
+ case PIX_FMT_RGB4:\
+ case PIX_FMT_BGR4:\
+ {\
+ const uint8_t * const d64= dither_8x8_73 [y&7];\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]\
+ + ((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);\
+ }\
+ }\
+ break;\
+ case PIX_FMT_RGB4_BYTE:\
+ case PIX_FMT_BGR4_BYTE:\
+ {\
+ const uint8_t * const d64= dither_8x8_73 [y&7];\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ func(uint8_t)\
+ ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];\
+ ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];\
+ }\
+ }\
+ break;\
+ case PIX_FMT_MONOBLACK:\
+ {\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ uint8_t *g= c->table_gU[128] + c->table_gV[128];\
+ for (i=0; i<dstW-7; i+=8){\
+ int acc;\
+ acc = g[((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19) + d128[0]];\
+ acc+= acc + g[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19) + d128[1]];\
+ acc+= acc + g[((buf0[i+2]*yalpha1+buf1[i+2]*yalpha)>>19) + d128[2]];\
+ acc+= acc + g[((buf0[i+3]*yalpha1+buf1[i+3]*yalpha)>>19) + d128[3]];\
+ acc+= acc + g[((buf0[i+4]*yalpha1+buf1[i+4]*yalpha)>>19) + d128[4]];\
+ acc+= acc + g[((buf0[i+5]*yalpha1+buf1[i+5]*yalpha)>>19) + d128[5]];\
+ acc+= acc + g[((buf0[i+6]*yalpha1+buf1[i+6]*yalpha)>>19) + d128[6]];\
+ acc+= acc + g[((buf0[i+7]*yalpha1+buf1[i+7]*yalpha)>>19) + d128[7]];\
+ ((uint8_t*)dest)[0]= acc;\
+ dest++;\
+ }\
\
/*\
((uint8_t*)dest)-= dstW>>4;\
{\
- int acc=0;\
- int left=0;\
- static int top[1024];\
- static int last_new[1024][1024];\
- static int last_in3[1024][1024];\
- static int drift[1024][1024];\
- int topLeft=0;\
- int shift=0;\
- int count=0;\
- const uint8_t * const d128=dither_8x8_220[y&7];\
- int error_new=0;\
- int error_in3=0;\
- int f=0;\
- \
- for(i=dstW>>1; i<dstW; i++){\
- int in= ((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19);\
- int in2 = (76309 * (in - 16) + 32768) >> 16;\
- int in3 = (in2 < 0) ? 0 : ((in2 > 255) ? 255 : in2);\
- int old= (left*7 + topLeft + top[i]*5 + top[i+1]*3)/20 + in3\
- + (last_new[y][i] - in3)*f/256;\
- int new= old> 128 ? 255 : 0;\
+ int acc=0;\
+ int left=0;\
+ static int top[1024];\
+ static int last_new[1024][1024];\
+ static int last_in3[1024][1024];\
+ static int drift[1024][1024];\
+ int topLeft=0;\
+ int shift=0;\
+ int count=0;\
+ const uint8_t * const d128=dither_8x8_220[y&7];\
+ int error_new=0;\
+ int error_in3=0;\
+ int f=0;\
+ \
+ for (i=dstW>>1; i<dstW; i++){\
+ int in= ((buf0[i ]*yalpha1+buf1[i ]*yalpha)>>19);\
+ int in2 = (76309 * (in - 16) + 32768) >> 16;\
+ int in3 = (in2 < 0) ? 0 : ((in2 > 255) ? 255 : in2);\
+ int old= (left*7 + topLeft + top[i]*5 + top[i+1]*3)/20 + in3\
+ + (last_new[y][i] - in3)*f/256;\
+ int new= old> 128 ? 255 : 0;\
\
- error_new+= FFABS(last_new[y][i] - new);\
- error_in3+= FFABS(last_in3[y][i] - in3);\
- f= error_new - error_in3*4;\
- if(f<0) f=0;\
- if(f>256) f=256;\
+ error_new+= FFABS(last_new[y][i] - new);\
+ error_in3+= FFABS(last_in3[y][i] - in3);\
+ f= error_new - error_in3*4;\
+ if (f<0) f=0;\
+ if (f>256) f=256;\
\
- topLeft= top[i];\
- left= top[i]= old - new;\
- last_new[y][i]= new;\
- last_in3[y][i]= in3;\
+ topLeft= top[i];\
+ left= top[i]= old - new;\
+ last_new[y][i]= new;\
+ last_in3[y][i]= in3;\
\
- acc+= acc + (new&1);\
- if((i&7)==6){\
- ((uint8_t*)dest)[0]= acc;\
- ((uint8_t*)dest)++;\
- }\
- }\
+ acc+= acc + (new&1);\
+ if ((i&7)==6){\
+ ((uint8_t*)dest)[0]= acc;\
+ ((uint8_t*)dest)++;\
+ }\
+ }\
}\
*/\
- }\
- break;\
- case PIX_FMT_YUYV422:\
- func2\
- ((uint8_t*)dest)[2*i2+0]= Y1;\
- ((uint8_t*)dest)[2*i2+1]= U;\
- ((uint8_t*)dest)[2*i2+2]= Y2;\
- ((uint8_t*)dest)[2*i2+3]= V;\
- } \
- break;\
- case PIX_FMT_UYVY422:\
- func2\
- ((uint8_t*)dest)[2*i2+0]= U;\
- ((uint8_t*)dest)[2*i2+1]= Y1;\
- ((uint8_t*)dest)[2*i2+2]= V;\
- ((uint8_t*)dest)[2*i2+3]= Y2;\
- } \
- break;\
- }\
+ }\
+ break;\
+ case PIX_FMT_YUYV422:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= Y1;\
+ ((uint8_t*)dest)[2*i2+1]= U;\
+ ((uint8_t*)dest)[2*i2+2]= Y2;\
+ ((uint8_t*)dest)[2*i2+3]= V;\
+ } \
+ break;\
+ case PIX_FMT_UYVY422:\
+ func2\
+ ((uint8_t*)dest)[2*i2+0]= U;\
+ ((uint8_t*)dest)[2*i2+1]= Y1;\
+ ((uint8_t*)dest)[2*i2+2]= V;\
+ ((uint8_t*)dest)[2*i2+3]= Y2;\
+ } \
+ break;\
+ }\
static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, int dstW, int y)
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, int dstW, int y)
{
- int i;
- switch(c->dstFormat)
- {
- case PIX_FMT_BGR32:
- case PIX_FMT_RGB32:
- YSCALE_YUV_2_RGBX_C(uint32_t)
- ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];
- ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];
- }
- break;
- case PIX_FMT_RGB24:
- YSCALE_YUV_2_RGBX_C(uint8_t)
- ((uint8_t*)dest)[0]= r[Y1];
- ((uint8_t*)dest)[1]= g[Y1];
- ((uint8_t*)dest)[2]= b[Y1];
- ((uint8_t*)dest)[3]= r[Y2];
- ((uint8_t*)dest)[4]= g[Y2];
- ((uint8_t*)dest)[5]= b[Y2];
- dest+=6;
- }
- break;
- case PIX_FMT_BGR24:
- YSCALE_YUV_2_RGBX_C(uint8_t)
- ((uint8_t*)dest)[0]= b[Y1];
- ((uint8_t*)dest)[1]= g[Y1];
- ((uint8_t*)dest)[2]= r[Y1];
- ((uint8_t*)dest)[3]= b[Y2];
- ((uint8_t*)dest)[4]= g[Y2];
- ((uint8_t*)dest)[5]= r[Y2];
- dest+=6;
- }
- break;
- case PIX_FMT_RGB565:
- case PIX_FMT_BGR565:
- {
- const int dr1= dither_2x2_8[y&1 ][0];
- const int dg1= dither_2x2_4[y&1 ][0];
- const int db1= dither_2x2_8[(y&1)^1][0];
- const int dr2= dither_2x2_8[y&1 ][1];
- const int dg2= dither_2x2_4[y&1 ][1];
- const int db2= dither_2x2_8[(y&1)^1][1];
- YSCALE_YUV_2_RGBX_C(uint16_t)
- ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];
- ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];
- }
- }
- break;
- case PIX_FMT_RGB555:
- case PIX_FMT_BGR555:
- {
- const int dr1= dither_2x2_8[y&1 ][0];
- const int dg1= dither_2x2_8[y&1 ][1];
- const int db1= dither_2x2_8[(y&1)^1][0];
- const int dr2= dither_2x2_8[y&1 ][1];
- const int dg2= dither_2x2_8[y&1 ][0];
- const int db2= dither_2x2_8[(y&1)^1][1];
- YSCALE_YUV_2_RGBX_C(uint16_t)
- ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];
- ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];
- }
- }
- break;
- case PIX_FMT_RGB8:
- case PIX_FMT_BGR8:
- {
- const uint8_t * const d64= dither_8x8_73[y&7];
- const uint8_t * const d32= dither_8x8_32[y&7];
- YSCALE_YUV_2_RGBX_C(uint8_t)
- ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];
- ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];
- }
- }
- break;
- case PIX_FMT_RGB4:
- case PIX_FMT_BGR4:
- {
- const uint8_t * const d64= dither_8x8_73 [y&7];
- const uint8_t * const d128=dither_8x8_220[y&7];
- YSCALE_YUV_2_RGBX_C(uint8_t)
- ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]
- +((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);
- }
- }
- break;
- case PIX_FMT_RGB4_BYTE:
- case PIX_FMT_BGR4_BYTE:
- {
- const uint8_t * const d64= dither_8x8_73 [y&7];
- const uint8_t * const d128=dither_8x8_220[y&7];
- YSCALE_YUV_2_RGBX_C(uint8_t)
- ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];
- ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];
- }
- }
- break;
- case PIX_FMT_MONOBLACK:
- {
- const uint8_t * const d128=dither_8x8_220[y&7];
- uint8_t *g= c->table_gU[128] + c->table_gV[128];
- int acc=0;
- for(i=0; i<dstW-1; i+=2){
- int j;
- int Y1=1<<18;
- int Y2=1<<18;
-
- for(j=0; j<lumFilterSize; j++)
- {
- Y1 += lumSrc[j][i] * lumFilter[j];
- Y2 += lumSrc[j][i+1] * lumFilter[j];
- }
- Y1>>=19;
- Y2>>=19;
- if((Y1|Y2)&256)
- {
- if(Y1>255) Y1=255;
- else if(Y1<0)Y1=0;
- if(Y2>255) Y2=255;
- else if(Y2<0)Y2=0;
- }
- acc+= acc + g[Y1+d128[(i+0)&7]];
- acc+= acc + g[Y2+d128[(i+1)&7]];
- if((i&7)==6){
- ((uint8_t*)dest)[0]= acc;
- dest++;
- }
- }
- }
- break;
- case PIX_FMT_YUYV422:
- YSCALE_YUV_2_PACKEDX_C(void)
- ((uint8_t*)dest)[2*i2+0]= Y1;
- ((uint8_t*)dest)[2*i2+1]= U;
- ((uint8_t*)dest)[2*i2+2]= Y2;
- ((uint8_t*)dest)[2*i2+3]= V;
- }
- break;
- case PIX_FMT_UYVY422:
- YSCALE_YUV_2_PACKEDX_C(void)
- ((uint8_t*)dest)[2*i2+0]= U;
- ((uint8_t*)dest)[2*i2+1]= Y1;
- ((uint8_t*)dest)[2*i2+2]= V;
- ((uint8_t*)dest)[2*i2+3]= Y2;
- }
- break;
- }
+ int i;
+ switch(c->dstFormat)
+ {
+ case PIX_FMT_BGR32:
+ case PIX_FMT_RGB32:
+ YSCALE_YUV_2_RGBX_C(uint32_t)
+ ((uint32_t*)dest)[i2+0]= r[Y1] + g[Y1] + b[Y1];
+ ((uint32_t*)dest)[i2+1]= r[Y2] + g[Y2] + b[Y2];
+ }
+ break;
+ case PIX_FMT_RGB24:
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[0]= r[Y1];
+ ((uint8_t*)dest)[1]= g[Y1];
+ ((uint8_t*)dest)[2]= b[Y1];
+ ((uint8_t*)dest)[3]= r[Y2];
+ ((uint8_t*)dest)[4]= g[Y2];
+ ((uint8_t*)dest)[5]= b[Y2];
+ dest+=6;
+ }
+ break;
+ case PIX_FMT_BGR24:
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[0]= b[Y1];
+ ((uint8_t*)dest)[1]= g[Y1];
+ ((uint8_t*)dest)[2]= r[Y1];
+ ((uint8_t*)dest)[3]= b[Y2];
+ ((uint8_t*)dest)[4]= g[Y2];
+ ((uint8_t*)dest)[5]= r[Y2];
+ dest+=6;
+ }
+ break;
+ case PIX_FMT_RGB565:
+ case PIX_FMT_BGR565:
+ {
+ const int dr1= dither_2x2_8[y&1 ][0];
+ const int dg1= dither_2x2_4[y&1 ][0];
+ const int db1= dither_2x2_8[(y&1)^1][0];
+ const int dr2= dither_2x2_8[y&1 ][1];
+ const int dg2= dither_2x2_4[y&1 ][1];
+ const int db2= dither_2x2_8[(y&1)^1][1];
+ YSCALE_YUV_2_RGBX_C(uint16_t)
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];
+ }
+ }
+ break;
+ case PIX_FMT_RGB555:
+ case PIX_FMT_BGR555:
+ {
+ const int dr1= dither_2x2_8[y&1 ][0];
+ const int dg1= dither_2x2_8[y&1 ][1];
+ const int db1= dither_2x2_8[(y&1)^1][0];
+ const int dr2= dither_2x2_8[y&1 ][1];
+ const int dg2= dither_2x2_8[y&1 ][0];
+ const int db2= dither_2x2_8[(y&1)^1][1];
+ YSCALE_YUV_2_RGBX_C(uint16_t)
+ ((uint16_t*)dest)[i2+0]= r[Y1+dr1] + g[Y1+dg1] + b[Y1+db1];
+ ((uint16_t*)dest)[i2+1]= r[Y2+dr2] + g[Y2+dg2] + b[Y2+db2];
+ }
+ }
+ break;
+ case PIX_FMT_RGB8:
+ case PIX_FMT_BGR8:
+ {
+ const uint8_t * const d64= dither_8x8_73[y&7];
+ const uint8_t * const d32= dither_8x8_32[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i2+0]= r[Y1+d32[(i2+0)&7]] + g[Y1+d32[(i2+0)&7]] + b[Y1+d64[(i2+0)&7]];
+ ((uint8_t*)dest)[i2+1]= r[Y2+d32[(i2+1)&7]] + g[Y2+d32[(i2+1)&7]] + b[Y2+d64[(i2+1)&7]];
+ }
+ }
+ break;
+ case PIX_FMT_RGB4:
+ case PIX_FMT_BGR4:
+ {
+ const uint8_t * const d64= dither_8x8_73 [y&7];
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]]
+ +((r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]])<<4);
+ }
+ }
+ break;
+ case PIX_FMT_RGB4_BYTE:
+ case PIX_FMT_BGR4_BYTE:
+ {
+ const uint8_t * const d64= dither_8x8_73 [y&7];
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ YSCALE_YUV_2_RGBX_C(uint8_t)
+ ((uint8_t*)dest)[i2+0]= r[Y1+d128[(i2+0)&7]] + g[Y1+d64[(i2+0)&7]] + b[Y1+d128[(i2+0)&7]];
+ ((uint8_t*)dest)[i2+1]= r[Y2+d128[(i2+1)&7]] + g[Y2+d64[(i2+1)&7]] + b[Y2+d128[(i2+1)&7]];
+ }
+ }
+ break;
+ case PIX_FMT_MONOBLACK:
+ {
+ const uint8_t * const d128=dither_8x8_220[y&7];
+ uint8_t *g= c->table_gU[128] + c->table_gV[128];
+ int acc=0;
+ for (i=0; i<dstW-1; i+=2){
+ int j;
+ int Y1=1<<18;
+ int Y2=1<<18;
+
+ for (j=0; j<lumFilterSize; j++)
+ {
+ Y1 += lumSrc[j][i] * lumFilter[j];
+ Y2 += lumSrc[j][i+1] * lumFilter[j];
+ }
+ Y1>>=19;
+ Y2>>=19;
+ if ((Y1|Y2)&256)
+ {
+ if (Y1>255) Y1=255;
+ else if (Y1<0)Y1=0;
+ if (Y2>255) Y2=255;
+ else if (Y2<0)Y2=0;
+ }
+ acc+= acc + g[Y1+d128[(i+0)&7]];
+ acc+= acc + g[Y2+d128[(i+1)&7]];
+ if ((i&7)==6){
+ ((uint8_t*)dest)[0]= acc;
+ dest++;
+ }
+ }
+ }
+ break;
+ case PIX_FMT_YUYV422:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= Y1;
+ ((uint8_t*)dest)[2*i2+1]= U;
+ ((uint8_t*)dest)[2*i2+2]= Y2;
+ ((uint8_t*)dest)[2*i2+3]= V;
+ }
+ break;
+ case PIX_FMT_UYVY422:
+ YSCALE_YUV_2_PACKEDX_C(void)
+ ((uint8_t*)dest)[2*i2+0]= U;
+ ((uint8_t*)dest)[2*i2+1]= Y1;
+ ((uint8_t*)dest)[2*i2+2]= V;
+ ((uint8_t*)dest)[2*i2+3]= Y2;
+ }
+ break;
+ }
}
@@ -902,1630 +970,1674 @@ static inline void yuv2packedXinC(SwsContext *c, int16_t *lumFilter, int16_t **l
static double getSplineCoeff(double a, double b, double c, double d, double dist)
{
-// printf("%f %f %f %f %f\n", a,b,c,d,dist);
- if(dist<=1.0) return ((d*dist + c)*dist + b)*dist +a;
- else return getSplineCoeff( 0.0,
- b+ 2.0*c + 3.0*d,
- c + 3.0*d,
- -b- 3.0*c - 6.0*d,
- dist-1.0);
+// printf("%f %f %f %f %f\n", a,b,c,d,dist);
+ if (dist<=1.0) return ((d*dist + c)*dist + b)*dist +a;
+ else return getSplineCoeff( 0.0,
+ b+ 2.0*c + 3.0*d,
+ c + 3.0*d,
+ -b- 3.0*c - 6.0*d,
+ dist-1.0);
}
static inline int initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc,
- int srcW, int dstW, int filterAlign, int one, int flags,
- SwsVector *srcFilter, SwsVector *dstFilter, double param[2])
+ int srcW, int dstW, int filterAlign, int one, int flags,
+ SwsVector *srcFilter, SwsVector *dstFilter, double param[2])
{
- int i;
- int filterSize;
- int filter2Size;
- int minFilterSize;
- double *filter=NULL;
- double *filter2=NULL;
+ int i;
+ int filterSize;
+ int filter2Size;
+ int minFilterSize;
+ double *filter=NULL;
+ double *filter2=NULL;
#if defined(ARCH_X86)
- if(flags & SWS_CPU_CAPS_MMX)
- asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
+ if (flags & SWS_CPU_CAPS_MMX)
+ asm volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions)
#endif
- // Note the +1 is for the MMXscaler which reads over the end
- *filterPos = av_malloc((dstW+1)*sizeof(int16_t));
-
- if(FFABS(xInc - 0x10000) <10) // unscaled
- {
- int i;
- filterSize= 1;
- filter= av_malloc(dstW*sizeof(double)*filterSize);
- for(i=0; i<dstW*filterSize; i++) filter[i]=0;
-
- for(i=0; i<dstW; i++)
- {
- filter[i*filterSize]=1;
- (*filterPos)[i]=i;
- }
-
- }
- else if(flags&SWS_POINT) // lame looking point sampling mode
- {
- int i;
- int xDstInSrc;
- filterSize= 1;
- filter= av_malloc(dstW*sizeof(double)*filterSize);
-
- xDstInSrc= xInc/2 - 0x8000;
- for(i=0; i<dstW; i++)
- {
- int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
-
- (*filterPos)[i]= xx;
- filter[i]= 1.0;
- xDstInSrc+= xInc;
- }
- }
- else if((xInc <= (1<<16) && (flags&SWS_AREA)) || (flags&SWS_FAST_BILINEAR)) // bilinear upscale
- {
- int i;
- int xDstInSrc;
- if (flags&SWS_BICUBIC) filterSize= 4;
- else if(flags&SWS_X ) filterSize= 4;
- else filterSize= 2; // SWS_BILINEAR / SWS_AREA
- filter= av_malloc(dstW*sizeof(double)*filterSize);
-
- xDstInSrc= xInc/2 - 0x8000;
- for(i=0; i<dstW; i++)
- {
- int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
- int j;
-
- (*filterPos)[i]= xx;
- //Bilinear upscale / linear interpolate / Area averaging
- for(j=0; j<filterSize; j++)
- {
- double d= FFABS((xx<<16) - xDstInSrc)/(double)(1<<16);
- double coeff= 1.0 - d;
- if(coeff<0) coeff=0;
- filter[i*filterSize + j]= coeff;
- xx++;
- }
- xDstInSrc+= xInc;
- }
- }
- else
- {
- double xDstInSrc;
- double sizeFactor, filterSizeInSrc;
- const double xInc1= (double)xInc / (double)(1<<16);
-
- if (flags&SWS_BICUBIC) sizeFactor= 4.0;
- else if(flags&SWS_X) sizeFactor= 8.0;
- else if(flags&SWS_AREA) sizeFactor= 1.0; //downscale only, for upscale it is bilinear
- else if(flags&SWS_GAUSS) sizeFactor= 8.0; // infinite ;)
- else if(flags&SWS_LANCZOS) sizeFactor= param[0] != SWS_PARAM_DEFAULT ? 2.0*param[0] : 6.0;
- else if(flags&SWS_SINC) sizeFactor= 20.0; // infinite ;)
- else if(flags&SWS_SPLINE) sizeFactor= 20.0; // infinite ;)
- else if(flags&SWS_BILINEAR) sizeFactor= 2.0;
- else {
- sizeFactor= 0.0; //GCC warning killer
- ASSERT(0)
- }
-
- if(xInc1 <= 1.0) filterSizeInSrc= sizeFactor; // upscale
- else filterSizeInSrc= sizeFactor*srcW / (double)dstW;
-
- filterSize= (int)ceil(1 + filterSizeInSrc); // will be reduced later if possible
- if(filterSize > srcW-2) filterSize=srcW-2;
-
- filter= av_malloc(dstW*sizeof(double)*filterSize);
-
- xDstInSrc= xInc1 / 2.0 - 0.5;
- for(i=0; i<dstW; i++)
- {
- int xx= (int)(xDstInSrc - (filterSize-1)*0.5 + 0.5);
- int j;
- (*filterPos)[i]= xx;
- for(j=0; j<filterSize; j++)
- {
- double d= FFABS(xx - xDstInSrc)/filterSizeInSrc*sizeFactor;
- double coeff;
- if(flags & SWS_BICUBIC)
- {
- double B= param[0] != SWS_PARAM_DEFAULT ? param[0] : 0.0;
- double C= param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6;
-
- if(d<1.0)
- coeff = (12-9*B-6*C)*d*d*d + (-18+12*B+6*C)*d*d + 6-2*B;
- else if(d<2.0)
- coeff = (-B-6*C)*d*d*d + (6*B+30*C)*d*d + (-12*B-48*C)*d +8*B+24*C;
- else
- coeff=0.0;
- }
-/* else if(flags & SWS_X)
- {
- double p= param ? param*0.01 : 0.3;
- coeff = d ? sin(d*PI)/(d*PI) : 1.0;
- coeff*= pow(2.0, - p*d*d);
- }*/
- else if(flags & SWS_X)
- {
- double A= param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
-
- if(d<1.0)
- coeff = cos(d*PI);
- else
- coeff=-1.0;
- if(coeff<0.0) coeff= -pow(-coeff, A);
- else coeff= pow( coeff, A);
- coeff= coeff*0.5 + 0.5;
- }
- else if(flags & SWS_AREA)
- {
- double srcPixelSize= 1.0/xInc1;
- if(d + srcPixelSize/2 < 0.5) coeff= 1.0;
- else if(d - srcPixelSize/2 < 0.5) coeff= (0.5-d)/srcPixelSize + 0.5;
- else coeff=0.0;
- }
- else if(flags & SWS_GAUSS)
- {
- double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
- coeff = pow(2.0, - p*d*d);
- }
- else if(flags & SWS_SINC)
- {
- coeff = d ? sin(d*PI)/(d*PI) : 1.0;
- }
- else if(flags & SWS_LANCZOS)
- {
- double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
- coeff = d ? sin(d*PI)*sin(d*PI/p)/(d*d*PI*PI/p) : 1.0;
- if(d>p) coeff=0;
- }
- else if(flags & SWS_BILINEAR)
- {
- coeff= 1.0 - d;
- if(coeff<0) coeff=0;
- }
- else if(flags & SWS_SPLINE)
- {
- double p=-2.196152422706632;
- coeff = getSplineCoeff(1.0, 0.0, p, -p-1.0, d);
- }
- else {
- coeff= 0.0; //GCC warning killer
- ASSERT(0)
- }
-
- filter[i*filterSize + j]= coeff;
- xx++;
- }
- xDstInSrc+= xInc1;
- }
- }
-
- /* apply src & dst Filter to filter -> filter2
- av_free(filter);
- */
- ASSERT(filterSize>0)
- filter2Size= filterSize;
- if(srcFilter) filter2Size+= srcFilter->length - 1;
- if(dstFilter) filter2Size+= dstFilter->length - 1;
- ASSERT(filter2Size>0)
- filter2= av_malloc(filter2Size*dstW*sizeof(double));
-
- for(i=0; i<dstW; i++)
- {
- int j;
- SwsVector scaleFilter;
- SwsVector *outVec;
-
- scaleFilter.coeff= filter + i*filterSize;
- scaleFilter.length= filterSize;
-
- if(srcFilter) outVec= sws_getConvVec(srcFilter, &scaleFilter);
- else outVec= &scaleFilter;
-
- ASSERT(outVec->length == filter2Size)
- //FIXME dstFilter
-
- for(j=0; j<outVec->length; j++)
- {
- filter2[i*filter2Size + j]= outVec->coeff[j];
- }
-
- (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2;
-
- if(outVec != &scaleFilter) sws_freeVec(outVec);
- }
- av_free(filter); filter=NULL;
-
- /* try to reduce the filter-size (step1 find size and shift left) */
- // Assume its near normalized (*0.5 or *2.0 is ok but * 0.001 is not)
- minFilterSize= 0;
- for(i=dstW-1; i>=0; i--)
- {
- int min= filter2Size;
- int j;
- double cutOff=0.0;
-
- /* get rid off near zero elements on the left by shifting left */
- for(j=0; j<filter2Size; j++)
- {
- int k;
- cutOff += FFABS(filter2[i*filter2Size]);
-
- if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
-
- /* preserve Monotonicity because the core can't handle the filter otherwise */
- if(i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
-
- // Move filter coeffs left
- for(k=1; k<filter2Size; k++)
- filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k];
- filter2[i*filter2Size + k - 1]= 0.0;
- (*filterPos)[i]++;
- }
-
- cutOff=0.0;
- /* count near zeros on the right */
- for(j=filter2Size-1; j>0; j--)
- {
- cutOff += FFABS(filter2[i*filter2Size + j]);
-
- if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
- min--;
- }
-
- if(min>minFilterSize) minFilterSize= min;
- }
-
- if (flags & SWS_CPU_CAPS_ALTIVEC) {
- // we can handle the special case 4,
- // so we don't want to go to the full 8
- if (minFilterSize < 5)
+ // Note the +1 is for the MMXscaler which reads over the end
+ *filterPos = av_malloc((dstW+1)*sizeof(int16_t));
+
+ if (FFABS(xInc - 0x10000) <10) // unscaled
+ {
+ int i;
+ filterSize= 1;
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+ for (i=0; i<dstW*filterSize; i++) filter[i]=0;
+
+ for (i=0; i<dstW; i++)
+ {
+ filter[i*filterSize]=1;
+ (*filterPos)[i]=i;
+ }
+
+ }
+ else if (flags&SWS_POINT) // lame looking point sampling mode
+ {
+ int i;
+ int xDstInSrc;
+ filterSize= 1;
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc/2 - 0x8000;
+ for (i=0; i<dstW; i++)
+ {
+ int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
+
+ (*filterPos)[i]= xx;
+ filter[i]= 1.0;
+ xDstInSrc+= xInc;
+ }
+ }
+ else if ((xInc <= (1<<16) && (flags&SWS_AREA)) || (flags&SWS_FAST_BILINEAR)) // bilinear upscale
+ {
+ int i;
+ int xDstInSrc;
+ if (flags&SWS_BICUBIC) filterSize= 4;
+ else if (flags&SWS_X ) filterSize= 4;
+ else filterSize= 2; // SWS_BILINEAR / SWS_AREA
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc/2 - 0x8000;
+ for (i=0; i<dstW; i++)
+ {
+ int xx= (xDstInSrc - ((filterSize-1)<<15) + (1<<15))>>16;
+ int j;
+
+ (*filterPos)[i]= xx;
+ //Bilinear upscale / linear interpolate / Area averaging
+ for (j=0; j<filterSize; j++)
+ {
+ double d= FFABS((xx<<16) - xDstInSrc)/(double)(1<<16);
+ double coeff= 1.0 - d;
+ if (coeff<0) coeff=0;
+ filter[i*filterSize + j]= coeff;
+ xx++;
+ }
+ xDstInSrc+= xInc;
+ }
+ }
+ else
+ {
+ double xDstInSrc;
+ double sizeFactor, filterSizeInSrc;
+ const double xInc1= (double)xInc / (double)(1<<16);
+
+ if (flags&SWS_BICUBIC) sizeFactor= 4.0;
+ else if (flags&SWS_X) sizeFactor= 8.0;
+ else if (flags&SWS_AREA) sizeFactor= 1.0; //downscale only, for upscale it is bilinear
+ else if (flags&SWS_GAUSS) sizeFactor= 8.0; // infinite ;)
+ else if (flags&SWS_LANCZOS) sizeFactor= param[0] != SWS_PARAM_DEFAULT ? 2.0*param[0] : 6.0;
+ else if (flags&SWS_SINC) sizeFactor= 20.0; // infinite ;)
+ else if (flags&SWS_SPLINE) sizeFactor= 20.0; // infinite ;)
+ else if (flags&SWS_BILINEAR) sizeFactor= 2.0;
+ else {
+ sizeFactor= 0.0; //GCC warning killer
+ ASSERT(0)
+ }
+
+ if (xInc1 <= 1.0) filterSizeInSrc= sizeFactor; // upscale
+ else filterSizeInSrc= sizeFactor*srcW / (double)dstW;
+
+ filterSize= (int)ceil(1 + filterSizeInSrc); // will be reduced later if possible
+ if (filterSize > srcW-2) filterSize=srcW-2;
+
+ filter= av_malloc(dstW*sizeof(double)*filterSize);
+
+ xDstInSrc= xInc1 / 2.0 - 0.5;
+ for (i=0; i<dstW; i++)
+ {
+ int xx= (int)(xDstInSrc - (filterSize-1)*0.5 + 0.5);
+ int j;
+ (*filterPos)[i]= xx;
+ for (j=0; j<filterSize; j++)
+ {
+ double d= FFABS(xx - xDstInSrc)/filterSizeInSrc*sizeFactor;
+ double coeff;
+ if (flags & SWS_BICUBIC)
+ {
+ double B= param[0] != SWS_PARAM_DEFAULT ? param[0] : 0.0;
+ double C= param[1] != SWS_PARAM_DEFAULT ? param[1] : 0.6;
+
+ if (d<1.0)
+ coeff = (12-9*B-6*C)*d*d*d + (-18+12*B+6*C)*d*d + 6-2*B;
+ else if (d<2.0)
+ coeff = (-B-6*C)*d*d*d + (6*B+30*C)*d*d + (-12*B-48*C)*d +8*B+24*C;
+ else
+ coeff=0.0;
+ }
+/* else if (flags & SWS_X)
+ {
+ double p= param ? param*0.01 : 0.3;
+ coeff = d ? sin(d*PI)/(d*PI) : 1.0;
+ coeff*= pow(2.0, - p*d*d);
+ }*/
+ else if (flags & SWS_X)
+ {
+ double A= param[0] != SWS_PARAM_DEFAULT ? param[0] : 1.0;
+
+ if (d<1.0)
+ coeff = cos(d*PI);
+ else
+ coeff=-1.0;
+ if (coeff<0.0) coeff= -pow(-coeff, A);
+ else coeff= pow( coeff, A);
+ coeff= coeff*0.5 + 0.5;
+ }
+ else if (flags & SWS_AREA)
+ {
+ double srcPixelSize= 1.0/xInc1;
+ if (d + srcPixelSize/2 < 0.5) coeff= 1.0;
+ else if (d - srcPixelSize/2 < 0.5) coeff= (0.5-d)/srcPixelSize + 0.5;
+ else coeff=0.0;
+ }
+ else if (flags & SWS_GAUSS)
+ {
+ double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
+ coeff = pow(2.0, - p*d*d);
+ }
+ else if (flags & SWS_SINC)
+ {
+ coeff = d ? sin(d*PI)/(d*PI) : 1.0;
+ }
+ else if (flags & SWS_LANCZOS)
+ {
+ double p= param[0] != SWS_PARAM_DEFAULT ? param[0] : 3.0;
+ coeff = d ? sin(d*PI)*sin(d*PI/p)/(d*d*PI*PI/p) : 1.0;
+ if (d>p) coeff=0;
+ }
+ else if (flags & SWS_BILINEAR)
+ {
+ coeff= 1.0 - d;
+ if (coeff<0) coeff=0;
+ }
+ else if (flags & SWS_SPLINE)
+ {
+ double p=-2.196152422706632;
+ coeff = getSplineCoeff(1.0, 0.0, p, -p-1.0, d);
+ }
+ else {
+ coeff= 0.0; //GCC warning killer
+ ASSERT(0)
+ }
+
+ filter[i*filterSize + j]= coeff;
+ xx++;
+ }
+ xDstInSrc+= xInc1;
+ }
+ }
+
+ /* apply src & dst Filter to filter -> filter2
+ av_free(filter);
+ */
+ ASSERT(filterSize>0)
+ filter2Size= filterSize;
+ if (srcFilter) filter2Size+= srcFilter->length - 1;
+ if (dstFilter) filter2Size+= dstFilter->length - 1;
+ ASSERT(filter2Size>0)
+ filter2= av_malloc(filter2Size*dstW*sizeof(double));
+
+ for (i=0; i<dstW; i++)
+ {
+ int j;
+ SwsVector scaleFilter;
+ SwsVector *outVec;
+
+ scaleFilter.coeff= filter + i*filterSize;
+ scaleFilter.length= filterSize;
+
+ if (srcFilter) outVec= sws_getConvVec(srcFilter, &scaleFilter);
+ else outVec= &scaleFilter;
+
+ ASSERT(outVec->length == filter2Size)
+ //FIXME dstFilter
+
+ for (j=0; j<outVec->length; j++)
+ {
+ filter2[i*filter2Size + j]= outVec->coeff[j];
+ }
+
+ (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2;
+
+ if (outVec != &scaleFilter) sws_freeVec(outVec);
+ }
+ av_free(filter); filter=NULL;
+
+ /* try to reduce the filter-size (step1 find size and shift left) */
+ // Assume it is near normalized (*0.5 or *2.0 is OK but * 0.001 is not).
+ minFilterSize= 0;
+ for (i=dstW-1; i>=0; i--)
+ {
+ int min= filter2Size;
+ int j;
+ double cutOff=0.0;
+
+ /* get rid off near zero elements on the left by shifting left */
+ for (j=0; j<filter2Size; j++)
+ {
+ int k;
+ cutOff += FFABS(filter2[i*filter2Size]);
+
+ if (cutOff > SWS_MAX_REDUCE_CUTOFF) break;
+
+ /* preserve monotonicity because the core can't handle the filter otherwise */
+ if (i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
+
+ // Move filter coeffs left
+ for (k=1; k<filter2Size; k++)
+ filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k];
+ filter2[i*filter2Size + k - 1]= 0.0;
+ (*filterPos)[i]++;
+ }
+
+ cutOff=0.0;
+ /* count near zeros on the right */
+ for (j=filter2Size-1; j>0; j--)
+ {
+ cutOff += FFABS(filter2[i*filter2Size + j]);
+
+ if (cutOff > SWS_MAX_REDUCE_CUTOFF) break;
+ min--;
+ }
+
+ if (min>minFilterSize) minFilterSize= min;
+ }
+
+ if (flags & SWS_CPU_CAPS_ALTIVEC) {
+ // we can handle the special case 4,
+ // so we don't want to go to the full 8
+ if (minFilterSize < 5)
filterAlign = 4;
- // we really don't want to waste our time
- // doing useless computation, so fall-back on
- // the scalar C code for very small filter.
- // vectorizing is worth it only if you have
- // decent-sized vector.
- if (minFilterSize < 3)
+ // we really don't want to waste our time
+ // doing useless computation, so fall-back on
+ // the scalar C code for very small filter.
+ // vectorizing is worth it only if you have
+ // decent-sized vector.
+ if (minFilterSize < 3)
filterAlign = 1;
+ }
+
+ if (flags & SWS_CPU_CAPS_MMX) {
+ // special case for unscaled vertical filtering
+ if (minFilterSize == 1 && filterAlign == 2)
+ filterAlign= 1;
+ }
+
+ ASSERT(minFilterSize > 0)
+ filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1));
+ ASSERT(filterSize > 0)
+ filter= av_malloc(filterSize*dstW*sizeof(double));
+ if (filterSize >= MAX_FILTER_SIZE)
+ return -1;
+ *outFilterSize= filterSize;
+
+ if (flags&SWS_PRINT_INFO)
+ av_log(NULL, AV_LOG_VERBOSE, "SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize);
+ /* try to reduce the filter-size (step2 reduce it) */
+ for (i=0; i<dstW; i++)
+ {
+ int j;
+
+ for (j=0; j<filterSize; j++)
+ {
+ if (j>=filter2Size) filter[i*filterSize + j]= 0.0;
+ else filter[i*filterSize + j]= filter2[i*filter2Size + j];
+ }
+ }
+ av_free(filter2); filter2=NULL;
+
+
+ //FIXME try to align filterpos if possible
+
+ //fix borders
+ for (i=0; i<dstW; i++)
+ {
+ int j;
+ if ((*filterPos)[i] < 0)
+ {
+ // Move filter coeffs left to compensate for filterPos
+ for (j=1; j<filterSize; j++)
+ {
+ int left= FFMAX(j + (*filterPos)[i], 0);
+ filter[i*filterSize + left] += filter[i*filterSize + j];
+ filter[i*filterSize + j]=0;
+ }
+ (*filterPos)[i]= 0;
}
- if (flags & SWS_CPU_CAPS_MMX) {
- // special case for unscaled vertical filtering
- if(minFilterSize == 1 && filterAlign == 2)
- filterAlign= 1;
+ if ((*filterPos)[i] + filterSize > srcW)
+ {
+ int shift= (*filterPos)[i] + filterSize - srcW;
+ // Move filter coeffs right to compensate for filterPos
+ for (j=filterSize-2; j>=0; j--)
+ {
+ int right= FFMIN(j + shift, filterSize-1);
+ filter[i*filterSize +right] += filter[i*filterSize +j];
+ filter[i*filterSize +j]=0;
+ }
+ (*filterPos)[i]= srcW - filterSize;
}
+ }
- ASSERT(minFilterSize > 0)
- filterSize= (minFilterSize +(filterAlign-1)) & (~(filterAlign-1));
- ASSERT(filterSize > 0)
- filter= av_malloc(filterSize*dstW*sizeof(double));
- if(filterSize >= MAX_FILTER_SIZE)
- return -1;
- *outFilterSize= filterSize;
-
- if(flags&SWS_PRINT_INFO)
- av_log(NULL, AV_LOG_VERBOSE, "SwScaler: reducing / aligning filtersize %d -> %d\n", filter2Size, filterSize);
- /* try to reduce the filter-size (step2 reduce it) */
- for(i=0; i<dstW; i++)
- {
- int j;
-
- for(j=0; j<filterSize; j++)
- {
- if(j>=filter2Size) filter[i*filterSize + j]= 0.0;
- else filter[i*filterSize + j]= filter2[i*filter2Size + j];
- }
- }
- av_free(filter2); filter2=NULL;
-
-
- //FIXME try to align filterpos if possible
-
- //fix borders
- for(i=0; i<dstW; i++)
- {
- int j;
- if((*filterPos)[i] < 0)
- {
- // Move filter coeffs left to compensate for filterPos
- for(j=1; j<filterSize; j++)
- {
- int left= FFMAX(j + (*filterPos)[i], 0);
- filter[i*filterSize + left] += filter[i*filterSize + j];
- filter[i*filterSize + j]=0;
- }
- (*filterPos)[i]= 0;
- }
-
- if((*filterPos)[i] + filterSize > srcW)
- {
- int shift= (*filterPos)[i] + filterSize - srcW;
- // Move filter coeffs right to compensate for filterPos
- for(j=filterSize-2; j>=0; j--)
- {
- int right= FFMIN(j + shift, filterSize-1);
- filter[i*filterSize +right] += filter[i*filterSize +j];
- filter[i*filterSize +j]=0;
- }
- (*filterPos)[i]= srcW - filterSize;
- }
- }
-
- // Note the +1 is for the MMXscaler which reads over the end
- /* align at 16 for AltiVec (needed by hScale_altivec_real) */
- *outFilter= av_mallocz(*outFilterSize*(dstW+1)*sizeof(int16_t));
-
- /* Normalize & Store in outFilter */
- for(i=0; i<dstW; i++)
- {
- int j;
- double error=0;
- double sum=0;
- double scale= one;
-
- for(j=0; j<filterSize; j++)
- {
- sum+= filter[i*filterSize + j];
- }
- scale/= sum;
- for(j=0; j<*outFilterSize; j++)
- {
- double v= filter[i*filterSize + j]*scale + error;
- int intV= floor(v + 0.5);
- (*outFilter)[i*(*outFilterSize) + j]= intV;
- error = v - intV;
- }
- }
-
- (*filterPos)[dstW]= (*filterPos)[dstW-1]; // the MMX scaler will read over the end
- for(i=0; i<*outFilterSize; i++)
- {
- int j= dstW*(*outFilterSize);
- (*outFilter)[j + i]= (*outFilter)[j + i - (*outFilterSize)];
- }
-
- av_free(filter);
- return 0;
+ // Note the +1 is for the MMXscaler which reads over the end
+ /* align at 16 for AltiVec (needed by hScale_altivec_real) */
+ *outFilter= av_mallocz(*outFilterSize*(dstW+1)*sizeof(int16_t));
+
+ /* Normalize & Store in outFilter */
+ for (i=0; i<dstW; i++)
+ {
+ int j;
+ double error=0;
+ double sum=0;
+ double scale= one;
+
+ for (j=0; j<filterSize; j++)
+ {
+ sum+= filter[i*filterSize + j];
+ }
+ scale/= sum;
+ for (j=0; j<*outFilterSize; j++)
+ {
+ double v= filter[i*filterSize + j]*scale + error;
+ int intV= floor(v + 0.5);
+ (*outFilter)[i*(*outFilterSize) + j]= intV;
+ error = v - intV;
+ }
+ }
+
+ (*filterPos)[dstW]= (*filterPos)[dstW-1]; // the MMX scaler will read over the end
+ for (i=0; i<*outFilterSize; i++)
+ {
+ int j= dstW*(*outFilterSize);
+ (*outFilter)[j + i]= (*outFilter)[j + i - (*outFilterSize)];
+ }
+
+ av_free(filter);
+ return 0;
}
#ifdef COMPILE_MMX2
static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
{
- uint8_t *fragmentA;
- long imm8OfPShufW1A;
- long imm8OfPShufW2A;
- long fragmentLengthA;
- uint8_t *fragmentB;
- long imm8OfPShufW1B;
- long imm8OfPShufW2B;
- long fragmentLengthB;
- int fragmentPos;
-
- int xpos, i;
-
- // create an optimized horizontal scaling routine
-
- //code fragment
-
- asm volatile(
- "jmp 9f \n\t"
- // Begin
- "0: \n\t"
- "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
- "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
- "movd 1(%%"REG_c", %%"REG_S"), %%mm1\n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "pshufw $0xFF, %%mm1, %%mm1 \n\t"
- "1: \n\t"
- "pshufw $0xFF, %%mm0, %%mm0 \n\t"
- "2: \n\t"
- "psubw %%mm1, %%mm0 \n\t"
- "movl 8(%%"REG_b", %%"REG_a"), %%esi\n\t"
- "pmullw %%mm3, %%mm0 \n\t"
- "psllw $7, %%mm1 \n\t"
- "paddw %%mm1, %%mm0 \n\t"
-
- "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
-
- "add $8, %%"REG_a" \n\t"
- // End
- "9: \n\t"
-// "int $3\n\t"
- "lea 0b, %0 \n\t"
- "lea 1b, %1 \n\t"
- "lea 2b, %2 \n\t"
- "dec %1 \n\t"
- "dec %2 \n\t"
- "sub %0, %1 \n\t"
- "sub %0, %2 \n\t"
- "lea 9b, %3 \n\t"
- "sub %0, %3 \n\t"
-
-
- :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
- "=r" (fragmentLengthA)
- );
-
- asm volatile(
- "jmp 9f \n\t"
- // Begin
- "0: \n\t"
- "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
- "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "pshufw $0xFF, %%mm0, %%mm1 \n\t"
- "1: \n\t"
- "pshufw $0xFF, %%mm0, %%mm0 \n\t"
- "2: \n\t"
- "psubw %%mm1, %%mm0 \n\t"
- "movl 8(%%"REG_b", %%"REG_a"), %%esi\n\t"
- "pmullw %%mm3, %%mm0 \n\t"
- "psllw $7, %%mm1 \n\t"
- "paddw %%mm1, %%mm0 \n\t"
-
- "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
-
- "add $8, %%"REG_a" \n\t"
- // End
- "9: \n\t"
-// "int $3\n\t"
- "lea 0b, %0 \n\t"
- "lea 1b, %1 \n\t"
- "lea 2b, %2 \n\t"
- "dec %1 \n\t"
- "dec %2 \n\t"
- "sub %0, %1 \n\t"
- "sub %0, %2 \n\t"
- "lea 9b, %3 \n\t"
- "sub %0, %3 \n\t"
-
-
- :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
- "=r" (fragmentLengthB)
- );
-
- xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
- fragmentPos=0;
-
- for(i=0; i<dstW/numSplits; i++)
- {
- int xx=xpos>>16;
-
- if((i&3) == 0)
- {
- int a=0;
- int b=((xpos+xInc)>>16) - xx;
- int c=((xpos+xInc*2)>>16) - xx;
- int d=((xpos+xInc*3)>>16) - xx;
-
- filter[i ] = (( xpos & 0xFFFF) ^ 0xFFFF)>>9;
- filter[i+1] = (((xpos+xInc ) & 0xFFFF) ^ 0xFFFF)>>9;
- filter[i+2] = (((xpos+xInc*2) & 0xFFFF) ^ 0xFFFF)>>9;
- filter[i+3] = (((xpos+xInc*3) & 0xFFFF) ^ 0xFFFF)>>9;
- filterPos[i/2]= xx;
-
- if(d+1<4)
- {
- int maxShift= 3-(d+1);
- int shift=0;
-
- memcpy(funnyCode + fragmentPos, fragmentB, fragmentLengthB);
-
- funnyCode[fragmentPos + imm8OfPShufW1B]=
- (a+1) | ((b+1)<<2) | ((c+1)<<4) | ((d+1)<<6);
- funnyCode[fragmentPos + imm8OfPShufW2B]=
- a | (b<<2) | (c<<4) | (d<<6);
-
- if(i+3>=dstW) shift=maxShift; //avoid overread
- else if((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //Align
-
- if(shift && i>=shift)
- {
- funnyCode[fragmentPos + imm8OfPShufW1B]+= 0x55*shift;
- funnyCode[fragmentPos + imm8OfPShufW2B]+= 0x55*shift;
- filterPos[i/2]-=shift;
- }
-
- fragmentPos+= fragmentLengthB;
- }
- else
- {
- int maxShift= 3-d;
- int shift=0;
-
- memcpy(funnyCode + fragmentPos, fragmentA, fragmentLengthA);
-
- funnyCode[fragmentPos + imm8OfPShufW1A]=
- funnyCode[fragmentPos + imm8OfPShufW2A]=
- a | (b<<2) | (c<<4) | (d<<6);
-
- if(i+4>=dstW) shift=maxShift; //avoid overread
- else if((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //partial align
-
- if(shift && i>=shift)
- {
- funnyCode[fragmentPos + imm8OfPShufW1A]+= 0x55*shift;
- funnyCode[fragmentPos + imm8OfPShufW2A]+= 0x55*shift;
- filterPos[i/2]-=shift;
- }
-
- fragmentPos+= fragmentLengthA;
- }
-
- funnyCode[fragmentPos]= RET;
- }
- xpos+=xInc;
- }
- filterPos[i/2]= xpos>>16; // needed to jump to the next part
+ uint8_t *fragmentA;
+ long imm8OfPShufW1A;
+ long imm8OfPShufW2A;
+ long fragmentLengthA;
+ uint8_t *fragmentB;
+ long imm8OfPShufW1B;
+ long imm8OfPShufW2B;
+ long fragmentLengthB;
+ int fragmentPos;
+
+ int xpos, i;
+
+ // create an optimized horizontal scaling routine
+
+ //code fragment
+
+ asm volatile(
+ "jmp 9f \n\t"
+ // Begin
+ "0: \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t"
+ "movd 1(%%"REG_c", %%"REG_S"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "pshufw $0xFF, %%mm1, %%mm1 \n\t"
+ "1: \n\t"
+ "pshufw $0xFF, %%mm0, %%mm0 \n\t"
+ "2: \n\t"
+ "psubw %%mm1, %%mm0 \n\t"
+ "movl 8(%%"REG_b", %%"REG_a"), %%esi \n\t"
+ "pmullw %%mm3, %%mm0 \n\t"
+ "psllw $7, %%mm1 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+
+ "movq %%mm0, (%%"REG_D", %%"REG_a") \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ // End
+ "9: \n\t"
+// "int $3 \n\t"
+ "lea 0b, %0 \n\t"
+ "lea 1b, %1 \n\t"
+ "lea 2b, %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea 9b, %3 \n\t"
+ "sub %0, %3 \n\t"
+
+
+ :"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
+ "=r" (fragmentLengthA)
+ );
+
+ asm volatile(
+ "jmp 9f \n\t"
+ // Begin
+ "0: \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3 \n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "pshufw $0xFF, %%mm0, %%mm1 \n\t"
+ "1: \n\t"
+ "pshufw $0xFF, %%mm0, %%mm0 \n\t"
+ "2: \n\t"
+ "psubw %%mm1, %%mm0 \n\t"
+ "movl 8(%%"REG_b", %%"REG_a"), %%esi \n\t"
+ "pmullw %%mm3, %%mm0 \n\t"
+ "psllw $7, %%mm1 \n\t"
+ "paddw %%mm1, %%mm0 \n\t"
+
+ "movq %%mm0, (%%"REG_D", %%"REG_a") \n\t"
+
+ "add $8, %%"REG_a" \n\t"
+ // End
+ "9: \n\t"
+// "int $3 \n\t"
+ "lea 0b, %0 \n\t"
+ "lea 1b, %1 \n\t"
+ "lea 2b, %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea 9b, %3 \n\t"
+ "sub %0, %3 \n\t"
+
+
+ :"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
+ "=r" (fragmentLengthB)
+ );
+
+ xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
+ fragmentPos=0;
+
+ for (i=0; i<dstW/numSplits; i++)
+ {
+ int xx=xpos>>16;
+
+ if ((i&3) == 0)
+ {
+ int a=0;
+ int b=((xpos+xInc)>>16) - xx;
+ int c=((xpos+xInc*2)>>16) - xx;
+ int d=((xpos+xInc*3)>>16) - xx;
+
+ filter[i ] = (( xpos & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+1] = (((xpos+xInc ) & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+2] = (((xpos+xInc*2) & 0xFFFF) ^ 0xFFFF)>>9;
+ filter[i+3] = (((xpos+xInc*3) & 0xFFFF) ^ 0xFFFF)>>9;
+ filterPos[i/2]= xx;
+
+ if (d+1<4)
+ {
+ int maxShift= 3-(d+1);
+ int shift=0;
+
+ memcpy(funnyCode + fragmentPos, fragmentB, fragmentLengthB);
+
+ funnyCode[fragmentPos + imm8OfPShufW1B]=
+ (a+1) | ((b+1)<<2) | ((c+1)<<4) | ((d+1)<<6);
+ funnyCode[fragmentPos + imm8OfPShufW2B]=
+ a | (b<<2) | (c<<4) | (d<<6);
+
+ if (i+3>=dstW) shift=maxShift; //avoid overread
+ else if ((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //Align
+
+ if (shift && i>=shift)
+ {
+ funnyCode[fragmentPos + imm8OfPShufW1B]+= 0x55*shift;
+ funnyCode[fragmentPos + imm8OfPShufW2B]+= 0x55*shift;
+ filterPos[i/2]-=shift;
+ }
+
+ fragmentPos+= fragmentLengthB;
+ }
+ else
+ {
+ int maxShift= 3-d;
+ int shift=0;
+
+ memcpy(funnyCode + fragmentPos, fragmentA, fragmentLengthA);
+
+ funnyCode[fragmentPos + imm8OfPShufW1A]=
+ funnyCode[fragmentPos + imm8OfPShufW2A]=
+ a | (b<<2) | (c<<4) | (d<<6);
+
+ if (i+4>=dstW) shift=maxShift; //avoid overread
+ else if ((filterPos[i/2]&3) <= maxShift) shift=filterPos[i/2]&3; //partial align
+
+ if (shift && i>=shift)
+ {
+ funnyCode[fragmentPos + imm8OfPShufW1A]+= 0x55*shift;
+ funnyCode[fragmentPos + imm8OfPShufW2A]+= 0x55*shift;
+ filterPos[i/2]-=shift;
+ }
+
+ fragmentPos+= fragmentLengthA;
+ }
+
+ funnyCode[fragmentPos]= RET;
+ }
+ xpos+=xInc;
+ }
+ filterPos[i/2]= xpos>>16; // needed to jump to the next part
}
#endif /* COMPILE_MMX2 */
static void globalInit(void){
// generating tables:
int i;
- for(i=0; i<768; i++){
- int c= av_clip_uint8(i-256);
- clip_table[i]=c;
+ for (i=0; i<768; i++){
+ int c= av_clip_uint8(i-256);
+ clip_table[i]=c;
}
}
static SwsFunc getSwsFunc(int flags){
-
+
#if defined(RUNTIME_CPUDETECT) && defined (CONFIG_GPL)
#if defined(ARCH_X86)
- // ordered per speed fasterst first
- if(flags & SWS_CPU_CAPS_MMX2)
- return swScale_MMX2;
- else if(flags & SWS_CPU_CAPS_3DNOW)
- return swScale_3DNow;
- else if(flags & SWS_CPU_CAPS_MMX)
- return swScale_MMX;
- else
- return swScale_C;
+ // ordered per speed fastest first
+ if (flags & SWS_CPU_CAPS_MMX2)
+ return swScale_MMX2;
+ else if (flags & SWS_CPU_CAPS_3DNOW)
+ return swScale_3DNow;
+ else if (flags & SWS_CPU_CAPS_MMX)
+ return swScale_MMX;
+ else
+ return swScale_C;
#else
#ifdef ARCH_POWERPC
- if(flags & SWS_CPU_CAPS_ALTIVEC)
- return swScale_altivec;
- else
- return swScale_C;
+ if (flags & SWS_CPU_CAPS_ALTIVEC)
+ return swScale_altivec;
+ else
+ return swScale_C;
#endif
- return swScale_C;
+ return swScale_C;
#endif /* defined(ARCH_X86) */
#else //RUNTIME_CPUDETECT
#ifdef HAVE_MMX2
- return swScale_MMX2;
+ return swScale_MMX2;
#elif defined (HAVE_3DNOW)
- return swScale_3DNow;
+ return swScale_3DNow;
#elif defined (HAVE_MMX)
- return swScale_MMX;
+ return swScale_MMX;
#elif defined (HAVE_ALTIVEC)
- return swScale_altivec;
+ return swScale_altivec;
#else
- return swScale_C;
+ return swScale_C;
#endif
#endif //!RUNTIME_CPUDETECT
}
static int PlanarToNV12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride[]){
- uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
- /* Copy Y plane */
- if(dstStride[0]==srcStride[0] && srcStride[0] > 0)
- memcpy(dst, src[0], srcSliceH*dstStride[0]);
- else
- {
- int i;
- uint8_t *srcPtr= src[0];
- uint8_t *dstPtr= dst;
- for(i=0; i<srcSliceH; i++)
- {
- memcpy(dstPtr, srcPtr, c->srcW);
- srcPtr+= srcStride[0];
- dstPtr+= dstStride[0];
- }
- }
- dst = dstParam[1] + dstStride[1]*srcSliceY/2;
- if (c->dstFormat == PIX_FMT_NV12)
- interleaveBytes( src[1],src[2],dst,c->srcW/2,srcSliceH/2,srcStride[1],srcStride[2],dstStride[0] );
- else
- interleaveBytes( src[2],src[1],dst,c->srcW/2,srcSliceH/2,srcStride[2],srcStride[1],dstStride[0] );
-
- return srcSliceH;
+ int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+ /* Copy Y plane */
+ if (dstStride[0]==srcStride[0] && srcStride[0] > 0)
+ memcpy(dst, src[0], srcSliceH*dstStride[0]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst;
+ for (i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ dst = dstParam[1] + dstStride[1]*srcSliceY/2;
+ if (c->dstFormat == PIX_FMT_NV12)
+ interleaveBytes(src[1], src[2], dst, c->srcW/2, srcSliceH/2, srcStride[1], srcStride[2], dstStride[0]);
+ else
+ interleaveBytes(src[2], src[1], dst, c->srcW/2, srcSliceH/2, srcStride[2], srcStride[1], dstStride[0]);
+
+ return srcSliceH;
}
static int PlanarToYuy2Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride[]){
- uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+ int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
- yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+ yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
- return srcSliceH;
+ return srcSliceH;
}
static int PlanarToUyvyWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride[]){
- uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
+ int srcSliceH, uint8_t* dstParam[], int dstStride[]){
+ uint8_t *dst=dstParam[0] + dstStride[0]*srcSliceY;
- yv12touyvy( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
+ yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
- return srcSliceH;
+ return srcSliceH;
}
/* {RGB,BGR}{15,16,24,32} -> {RGB,BGR}{15,16,24,32} */
static int rgb2rgbWrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
- const int srcFormat= c->srcFormat;
- const int dstFormat= c->dstFormat;
- const int srcBpp= (fmt_depth(srcFormat) + 7) >> 3;
- const int dstBpp= (fmt_depth(dstFormat) + 7) >> 3;
- const int srcId= fmt_depth(srcFormat) >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */
- const int dstId= fmt_depth(dstFormat) >> 2;
- void (*conv)(const uint8_t *src, uint8_t *dst, long src_size)=NULL;
-
- /* BGR -> BGR */
- if( (isBGR(srcFormat) && isBGR(dstFormat))
- || (isRGB(srcFormat) && isRGB(dstFormat))){
- switch(srcId | (dstId<<4)){
- case 0x34: conv= rgb16to15; break;
- case 0x36: conv= rgb24to15; break;
- case 0x38: conv= rgb32to15; break;
- case 0x43: conv= rgb15to16; break;
- case 0x46: conv= rgb24to16; break;
- case 0x48: conv= rgb32to16; break;
- case 0x63: conv= rgb15to24; break;
- case 0x64: conv= rgb16to24; break;
- case 0x68: conv= rgb32to24; break;
- case 0x83: conv= rgb15to32; break;
- case 0x84: conv= rgb16to32; break;
- case 0x86: conv= rgb24to32; break;
- default: av_log(c, AV_LOG_ERROR, "swScaler: internal error %s -> %s converter\n",
- sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
- }
- }else if( (isBGR(srcFormat) && isRGB(dstFormat))
- || (isRGB(srcFormat) && isBGR(dstFormat))){
- switch(srcId | (dstId<<4)){
- case 0x33: conv= rgb15tobgr15; break;
- case 0x34: conv= rgb16tobgr15; break;
- case 0x36: conv= rgb24tobgr15; break;
- case 0x38: conv= rgb32tobgr15; break;
- case 0x43: conv= rgb15tobgr16; break;
- case 0x44: conv= rgb16tobgr16; break;
- case 0x46: conv= rgb24tobgr16; break;
- case 0x48: conv= rgb32tobgr16; break;
- case 0x63: conv= rgb15tobgr24; break;
- case 0x64: conv= rgb16tobgr24; break;
- case 0x66: conv= rgb24tobgr24; break;
- case 0x68: conv= rgb32tobgr24; break;
- case 0x83: conv= rgb15tobgr32; break;
- case 0x84: conv= rgb16tobgr32; break;
- case 0x86: conv= rgb24tobgr32; break;
- case 0x88: conv= rgb32tobgr32; break;
- default: av_log(c, AV_LOG_ERROR, "swScaler: internal error %s -> %s converter\n",
- sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
- }
- }else{
- av_log(c, AV_LOG_ERROR, "swScaler: internal error %s -> %s converter\n",
- sws_format_name(srcFormat), sws_format_name(dstFormat));
- }
-
- if(dstStride[0]*srcBpp == srcStride[0]*dstBpp)
- conv(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
- else
- {
- int i;
- uint8_t *srcPtr= src[0];
- uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
-
- for(i=0; i<srcSliceH; i++)
- {
- conv(srcPtr, dstPtr, c->srcW*srcBpp);
- srcPtr+= srcStride[0];
- dstPtr+= dstStride[0];
- }
- }
- return srcSliceH;
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ const int srcFormat= c->srcFormat;
+ const int dstFormat= c->dstFormat;
+ const int srcBpp= (fmt_depth(srcFormat) + 7) >> 3;
+ const int dstBpp= (fmt_depth(dstFormat) + 7) >> 3;
+ const int srcId= fmt_depth(srcFormat) >> 2; /* 1:0, 4:1, 8:2, 15:3, 16:4, 24:6, 32:8 */
+ const int dstId= fmt_depth(dstFormat) >> 2;
+ void (*conv)(const uint8_t *src, uint8_t *dst, long src_size)=NULL;
+
+ /* BGR -> BGR */
+ if ( (isBGR(srcFormat) && isBGR(dstFormat))
+ || (isRGB(srcFormat) && isRGB(dstFormat))){
+ switch(srcId | (dstId<<4)){
+ case 0x34: conv= rgb16to15; break;
+ case 0x36: conv= rgb24to15; break;
+ case 0x38: conv= rgb32to15; break;
+ case 0x43: conv= rgb15to16; break;
+ case 0x46: conv= rgb24to16; break;
+ case 0x48: conv= rgb32to16; break;
+ case 0x63: conv= rgb15to24; break;
+ case 0x64: conv= rgb16to24; break;
+ case 0x68: conv= rgb32to24; break;
+ case 0x83: conv= rgb15to32; break;
+ case 0x84: conv= rgb16to32; break;
+ case 0x86: conv= rgb24to32; break;
+ default: av_log(c, AV_LOG_ERROR, "swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
+ }
+ }else if ( (isBGR(srcFormat) && isRGB(dstFormat))
+ || (isRGB(srcFormat) && isBGR(dstFormat))){
+ switch(srcId | (dstId<<4)){
+ case 0x33: conv= rgb15tobgr15; break;
+ case 0x34: conv= rgb16tobgr15; break;
+ case 0x36: conv= rgb24tobgr15; break;
+ case 0x38: conv= rgb32tobgr15; break;
+ case 0x43: conv= rgb15tobgr16; break;
+ case 0x44: conv= rgb16tobgr16; break;
+ case 0x46: conv= rgb24tobgr16; break;
+ case 0x48: conv= rgb32tobgr16; break;
+ case 0x63: conv= rgb15tobgr24; break;
+ case 0x64: conv= rgb16tobgr24; break;
+ case 0x66: conv= rgb24tobgr24; break;
+ case 0x68: conv= rgb32tobgr24; break;
+ case 0x83: conv= rgb15tobgr32; break;
+ case 0x84: conv= rgb16tobgr32; break;
+ case 0x86: conv= rgb24tobgr32; break;
+ case 0x88: conv= rgb32tobgr32; break;
+ default: av_log(c, AV_LOG_ERROR, "swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat)); break;
+ }
+ }else{
+ av_log(c, AV_LOG_ERROR, "swScaler: internal error %s -> %s converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+ }
+
+ if(conv)
+ {
+ if (dstStride[0]*srcBpp == srcStride[0]*dstBpp && srcStride[0] > 0)
+ conv(src[0], dst[0] + dstStride[0]*srcSliceY, srcSliceH*srcStride[0]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+
+ for (i=0; i<srcSliceH; i++)
+ {
+ conv(srcPtr, dstPtr, c->srcW*srcBpp);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ }
+ return srcSliceH;
}
static int bgr24toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
-
- rgb24toyv12(
- src[0],
- dst[0]+ srcSliceY *dstStride[0],
- dst[1]+(srcSliceY>>1)*dstStride[1],
- dst[2]+(srcSliceY>>1)*dstStride[2],
- c->srcW, srcSliceH,
- dstStride[0], dstStride[1], srcStride[0]);
- return srcSliceH;
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ rgb24toyv12(
+ src[0],
+ dst[0]+ srcSliceY *dstStride[0],
+ dst[1]+(srcSliceY>>1)*dstStride[1],
+ dst[2]+(srcSliceY>>1)*dstStride[2],
+ c->srcW, srcSliceH,
+ dstStride[0], dstStride[1], srcStride[0]);
+ return srcSliceH;
}
static int yvu9toyv12Wrapper(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
- int i;
-
- /* copy Y */
- if(srcStride[0]==dstStride[0] && srcStride[0] > 0)
- memcpy(dst[0]+ srcSliceY*dstStride[0], src[0], srcStride[0]*srcSliceH);
- else{
- uint8_t *srcPtr= src[0];
- uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
-
- for(i=0; i<srcSliceH; i++)
- {
- memcpy(dstPtr, srcPtr, c->srcW);
- srcPtr+= srcStride[0];
- dstPtr+= dstStride[0];
- }
- }
-
- if(c->dstFormat==PIX_FMT_YUV420P){
- planar2x(src[1], dst[1], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[1]);
- planar2x(src[2], dst[2], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[2]);
- }else{
- planar2x(src[1], dst[2], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[2]);
- planar2x(src[2], dst[1], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[1]);
- }
- return srcSliceH;
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int i;
+
+ /* copy Y */
+ if (srcStride[0]==dstStride[0] && srcStride[0] > 0)
+ memcpy(dst[0]+ srcSliceY*dstStride[0], src[0], srcStride[0]*srcSliceH);
+ else{
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+
+ for (i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, c->srcW);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+
+ if (c->dstFormat==PIX_FMT_YUV420P){
+ planar2x(src[1], dst[1], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[1]);
+ planar2x(src[2], dst[2], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[2]);
+ }else{
+ planar2x(src[1], dst[2], c->chrSrcW, c->chrSrcH, srcStride[1], dstStride[2]);
+ planar2x(src[2], dst[1], c->chrSrcW, c->chrSrcH, srcStride[2], dstStride[1]);
+ }
+ return srcSliceH;
}
/* unscaled copy like stuff (assumes nearly identical formats) */
static int simpleCopy(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
-
- if(isPacked(c->srcFormat))
- {
- if(dstStride[0]==srcStride[0] && srcStride[0] > 0)
- memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], srcSliceH*dstStride[0]);
- else
- {
- int i;
- uint8_t *srcPtr= src[0];
- uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
- int length=0;
-
- /* universal length finder */
- while(length+c->srcW <= FFABS(dstStride[0])
- && length+c->srcW <= FFABS(srcStride[0])) length+= c->srcW;
- ASSERT(length!=0);
-
- for(i=0; i<srcSliceH; i++)
- {
- memcpy(dstPtr, srcPtr, length);
- srcPtr+= srcStride[0];
- dstPtr+= dstStride[0];
- }
- }
- }
- else
- { /* Planar YUV or gray */
- int plane;
- for(plane=0; plane<3; plane++)
- {
- int length= plane==0 ? c->srcW : -((-c->srcW )>>c->chrDstHSubSample);
- int y= plane==0 ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample);
- int height= plane==0 ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample);
-
- if((isGray(c->srcFormat) || isGray(c->dstFormat)) && plane>0)
- {
- if(!isGray(c->dstFormat))
- memset(dst[plane], 128, dstStride[plane]*height);
- }
- else
- {
- if(dstStride[plane]==srcStride[plane] && srcStride[plane] > 0)
- memcpy(dst[plane] + dstStride[plane]*y, src[plane], height*dstStride[plane]);
- else
- {
- int i;
- uint8_t *srcPtr= src[plane];
- uint8_t *dstPtr= dst[plane] + dstStride[plane]*y;
- for(i=0; i<height; i++)
- {
- memcpy(dstPtr, srcPtr, length);
- srcPtr+= srcStride[plane];
- dstPtr+= dstStride[plane];
- }
- }
- }
- }
- }
- return srcSliceH;
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ if (isPacked(c->srcFormat))
+ {
+ if (dstStride[0]==srcStride[0] && srcStride[0] > 0)
+ memcpy(dst[0] + dstStride[0]*srcSliceY, src[0], srcSliceH*dstStride[0]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*srcSliceY;
+ int length=0;
+
+ /* universal length finder */
+ while(length+c->srcW <= FFABS(dstStride[0])
+ && length+c->srcW <= FFABS(srcStride[0])) length+= c->srcW;
+ ASSERT(length!=0);
+
+ for (i=0; i<srcSliceH; i++)
+ {
+ memcpy(dstPtr, srcPtr, length);
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ }
+ }
+ else
+ { /* Planar YUV or gray */
+ int plane;
+ for (plane=0; plane<3; plane++)
+ {
+ int length= plane==0 ? c->srcW : -((-c->srcW )>>c->chrDstHSubSample);
+ int y= plane==0 ? srcSliceY: -((-srcSliceY)>>c->chrDstVSubSample);
+ int height= plane==0 ? srcSliceH: -((-srcSliceH)>>c->chrDstVSubSample);
+
+ if ((isGray(c->srcFormat) || isGray(c->dstFormat)) && plane>0)
+ {
+ if (!isGray(c->dstFormat))
+ memset(dst[plane], 128, dstStride[plane]*height);
+ }
+ else
+ {
+ if (dstStride[plane]==srcStride[plane] && srcStride[plane] > 0)
+ memcpy(dst[plane] + dstStride[plane]*y, src[plane], height*dstStride[plane]);
+ else
+ {
+ int i;
+ uint8_t *srcPtr= src[plane];
+ uint8_t *dstPtr= dst[plane] + dstStride[plane]*y;
+ for (i=0; i<height; i++)
+ {
+ memcpy(dstPtr, srcPtr, length);
+ srcPtr+= srcStride[plane];
+ dstPtr+= dstStride[plane];
+ }
+ }
+ }
+ }
+ }
+ return srcSliceH;
}
static int gray16togray(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
-
- int length= c->srcW;
- int y= srcSliceY;
- int height= srcSliceH;
- int i, j;
- uint8_t *srcPtr= src[0];
- uint8_t *dstPtr= dst[0] + dstStride[0]*y;
-
- if(!isGray(c->dstFormat)){
- int height= -((-srcSliceH)>>c->chrDstVSubSample);
- memset(dst[1], 128, dstStride[1]*height);
- memset(dst[2], 128, dstStride[2]*height);
- }
- if(c->srcFormat == PIX_FMT_GRAY16LE) srcPtr++;
- for(i=0; i<height; i++)
- {
- for(j=0; j<length; j++) dstPtr[j] = srcPtr[j<<1];
- srcPtr+= srcStride[0];
- dstPtr+= dstStride[0];
- }
- return srcSliceH;
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ int length= c->srcW;
+ int y= srcSliceY;
+ int height= srcSliceH;
+ int i, j;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*y;
+
+ if (!isGray(c->dstFormat)){
+ int height= -((-srcSliceH)>>c->chrDstVSubSample);
+ memset(dst[1], 128, dstStride[1]*height);
+ memset(dst[2], 128, dstStride[2]*height);
+ }
+ if (c->srcFormat == PIX_FMT_GRAY16LE) srcPtr++;
+ for (i=0; i<height; i++)
+ {
+ for (j=0; j<length; j++) dstPtr[j] = srcPtr[j<<1];
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ return srcSliceH;
}
static int graytogray16(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
-
- int length= c->srcW;
- int y= srcSliceY;
- int height= srcSliceH;
- int i, j;
- uint8_t *srcPtr= src[0];
- uint8_t *dstPtr= dst[0] + dstStride[0]*y;
- for(i=0; i<height; i++)
- {
- for(j=0; j<length; j++)
- {
- dstPtr[j<<1] = srcPtr[j];
- dstPtr[(j<<1)+1] = srcPtr[j];
- }
- srcPtr+= srcStride[0];
- dstPtr+= dstStride[0];
- }
- return srcSliceH;
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ int length= c->srcW;
+ int y= srcSliceY;
+ int height= srcSliceH;
+ int i, j;
+ uint8_t *srcPtr= src[0];
+ uint8_t *dstPtr= dst[0] + dstStride[0]*y;
+ for (i=0; i<height; i++)
+ {
+ for (j=0; j<length; j++)
+ {
+ dstPtr[j<<1] = srcPtr[j];
+ dstPtr[(j<<1)+1] = srcPtr[j];
+ }
+ srcPtr+= srcStride[0];
+ dstPtr+= dstStride[0];
+ }
+ return srcSliceH;
}
static int gray16swap(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
-
- int length= c->srcW;
- int y= srcSliceY;
- int height= srcSliceH;
- int i, j;
- uint16_t *srcPtr= src[0];
- uint16_t *dstPtr= dst[0] + dstStride[0]*y/2;
- for(i=0; i<height; i++)
- {
- for(j=0; j<length; j++) dstPtr[j] = bswap_16(srcPtr[j]);
- srcPtr+= srcStride[0]/2;
- dstPtr+= dstStride[0]/2;
- }
- return srcSliceH;
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ int length= c->srcW;
+ int y= srcSliceY;
+ int height= srcSliceH;
+ int i, j;
+ uint16_t *srcPtr= src[0];
+ uint16_t *dstPtr= dst[0] + dstStride[0]*y/2;
+ for (i=0; i<height; i++)
+ {
+ for (j=0; j<length; j++) dstPtr[j] = bswap_16(srcPtr[j]);
+ srcPtr+= srcStride[0]/2;
+ dstPtr+= dstStride[0]/2;
+ }
+ return srcSliceH;
}
static void getSubSampleFactors(int *h, int *v, int format){
- switch(format){
- case PIX_FMT_UYVY422:
- case PIX_FMT_YUYV422:
- *h=1;
- *v=0;
- break;
- case PIX_FMT_YUV420P:
- case PIX_FMT_GRAY16BE:
- case PIX_FMT_GRAY16LE:
- case PIX_FMT_GRAY8: //FIXME remove after different subsamplings are fully implemented
- case PIX_FMT_NV12:
- case PIX_FMT_NV21:
- *h=1;
- *v=1;
- break;
- case PIX_FMT_YUV410P:
- *h=2;
- *v=2;
- break;
- case PIX_FMT_YUV444P:
- *h=0;
- *v=0;
- break;
- case PIX_FMT_YUV422P:
- *h=1;
- *v=0;
- break;
- case PIX_FMT_YUV411P:
- *h=2;
- *v=0;
- break;
- default:
- *h=0;
- *v=0;
- break;
- }
+ switch(format){
+ case PIX_FMT_UYVY422:
+ case PIX_FMT_YUYV422:
+ *h=1;
+ *v=0;
+ break;
+ case PIX_FMT_YUV420P:
+ case PIX_FMT_YUVA420P:
+ case PIX_FMT_GRAY16BE:
+ case PIX_FMT_GRAY16LE:
+ case PIX_FMT_GRAY8: //FIXME remove after different subsamplings are fully implemented
+ case PIX_FMT_NV12:
+ case PIX_FMT_NV21:
+ *h=1;
+ *v=1;
+ break;
+ case PIX_FMT_YUV440P:
+ *h=0;
+ *v=1;
+ break;
+ case PIX_FMT_YUV410P:
+ *h=2;
+ *v=2;
+ break;
+ case PIX_FMT_YUV444P:
+ *h=0;
+ *v=0;
+ break;
+ case PIX_FMT_YUV422P:
+ *h=1;
+ *v=0;
+ break;
+ case PIX_FMT_YUV411P:
+ *h=2;
+ *v=0;
+ break;
+ default:
+ *h=0;
+ *v=0;
+ break;
+ }
}
static uint16_t roundToInt16(int64_t f){
- int r= (f + (1<<15))>>16;
- if(r<-0x7FFF) return 0x8000;
- else if(r> 0x7FFF) return 0x7FFF;
- else return r;
+ int r= (f + (1<<15))>>16;
+ if (r<-0x7FFF) return 0x8000;
+ else if (r> 0x7FFF) return 0x7FFF;
+ else return r;
}
/**
* @param inv_table the yuv2rgb coeffs, normally Inverse_Table_6_9[x]
- * @param fullRange if 1 then the luma range is 0..255 if 0 its 16..235
+ * @param fullRange if 1 then the luma range is 0..255 if 0 it is 16..235
* @return -1 if not supported
*/
int sws_setColorspaceDetails(SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation){
- int64_t crv = inv_table[0];
- int64_t cbu = inv_table[1];
- int64_t cgu = -inv_table[2];
- int64_t cgv = -inv_table[3];
- int64_t cy = 1<<16;
- int64_t oy = 0;
-
- if(isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
- memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4);
- memcpy(c->dstColorspaceTable, table, sizeof(int)*4);
-
- c->brightness= brightness;
- c->contrast = contrast;
- c->saturation= saturation;
- c->srcRange = srcRange;
- c->dstRange = dstRange;
-
- c->uOffset= 0x0400040004000400LL;
- c->vOffset= 0x0400040004000400LL;
-
- if(!srcRange){
- cy= (cy*255) / 219;
- oy= 16<<16;
- }else{
- crv= (crv*224) / 255;
- cbu= (cbu*224) / 255;
- cgu= (cgu*224) / 255;
- cgv= (cgv*224) / 255;
- }
+ int64_t crv = inv_table[0];
+ int64_t cbu = inv_table[1];
+ int64_t cgu = -inv_table[2];
+ int64_t cgv = -inv_table[3];
+ int64_t cy = 1<<16;
+ int64_t oy = 0;
+
+ if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
+ memcpy(c->srcColorspaceTable, inv_table, sizeof(int)*4);
+ memcpy(c->dstColorspaceTable, table, sizeof(int)*4);
+
+ c->brightness= brightness;
+ c->contrast = contrast;
+ c->saturation= saturation;
+ c->srcRange = srcRange;
+ c->dstRange = dstRange;
+
+ c->uOffset= 0x0400040004000400LL;
+ c->vOffset= 0x0400040004000400LL;
+
+ if (!srcRange){
+ cy= (cy*255) / 219;
+ oy= 16<<16;
+ }else{
+ crv= (crv*224) / 255;
+ cbu= (cbu*224) / 255;
+ cgu= (cgu*224) / 255;
+ cgv= (cgv*224) / 255;
+ }
- cy = (cy *contrast )>>16;
- crv= (crv*contrast * saturation)>>32;
- cbu= (cbu*contrast * saturation)>>32;
- cgu= (cgu*contrast * saturation)>>32;
- cgv= (cgv*contrast * saturation)>>32;
+ cy = (cy *contrast )>>16;
+ crv= (crv*contrast * saturation)>>32;
+ cbu= (cbu*contrast * saturation)>>32;
+ cgu= (cgu*contrast * saturation)>>32;
+ cgv= (cgv*contrast * saturation)>>32;
- oy -= 256*brightness;
+ oy -= 256*brightness;
- c->yCoeff= roundToInt16(cy *8192) * 0x0001000100010001ULL;
- c->vrCoeff= roundToInt16(crv*8192) * 0x0001000100010001ULL;
- c->ubCoeff= roundToInt16(cbu*8192) * 0x0001000100010001ULL;
- c->vgCoeff= roundToInt16(cgv*8192) * 0x0001000100010001ULL;
- c->ugCoeff= roundToInt16(cgu*8192) * 0x0001000100010001ULL;
- c->yOffset= roundToInt16(oy * 8) * 0x0001000100010001ULL;
+ c->yCoeff= roundToInt16(cy *8192) * 0x0001000100010001ULL;
+ c->vrCoeff= roundToInt16(crv*8192) * 0x0001000100010001ULL;
+ c->ubCoeff= roundToInt16(cbu*8192) * 0x0001000100010001ULL;
+ c->vgCoeff= roundToInt16(cgv*8192) * 0x0001000100010001ULL;
+ c->ugCoeff= roundToInt16(cgu*8192) * 0x0001000100010001ULL;
+ c->yOffset= roundToInt16(oy * 8) * 0x0001000100010001ULL;
- yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation);
- //FIXME factorize
+ yuv2rgb_c_init_tables(c, inv_table, srcRange, brightness, contrast, saturation);
+ //FIXME factorize
#ifdef COMPILE_ALTIVEC
- if (c->flags & SWS_CPU_CAPS_ALTIVEC)
- yuv2rgb_altivec_init_tables (c, inv_table, brightness, contrast, saturation);
-#endif
- return 0;
+ if (c->flags & SWS_CPU_CAPS_ALTIVEC)
+ yuv2rgb_altivec_init_tables (c, inv_table, brightness, contrast, saturation);
+#endif
+ return 0;
}
/**
* @return -1 if not supported
*/
int sws_getColorspaceDetails(SwsContext *c, int **inv_table, int *srcRange, int **table, int *dstRange, int *brightness, int *contrast, int *saturation){
- if(isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
-
- *inv_table = c->srcColorspaceTable;
- *table = c->dstColorspaceTable;
- *srcRange = c->srcRange;
- *dstRange = c->dstRange;
- *brightness= c->brightness;
- *contrast = c->contrast;
- *saturation= c->saturation;
-
- return 0;
+ if (isYUV(c->dstFormat) || isGray(c->dstFormat)) return -1;
+
+ *inv_table = c->srcColorspaceTable;
+ *table = c->dstColorspaceTable;
+ *srcRange = c->srcRange;
+ *dstRange = c->dstRange;
+ *brightness= c->brightness;
+ *contrast = c->contrast;
+ *saturation= c->saturation;
+
+ return 0;
}
static int handle_jpeg(int *format)
{
- switch (*format) {
- case PIX_FMT_YUVJ420P:
- *format = PIX_FMT_YUV420P;
- return 1;
- case PIX_FMT_YUVJ422P:
- *format = PIX_FMT_YUV422P;
- return 1;
- case PIX_FMT_YUVJ444P:
- *format = PIX_FMT_YUV444P;
- return 1;
- default:
- return 0;
- }
+ switch (*format) {
+ case PIX_FMT_YUVJ420P:
+ *format = PIX_FMT_YUV420P;
+ return 1;
+ case PIX_FMT_YUVJ422P:
+ *format = PIX_FMT_YUV422P;
+ return 1;
+ case PIX_FMT_YUVJ444P:
+ *format = PIX_FMT_YUV444P;
+ return 1;
+ case PIX_FMT_YUVJ440P:
+ *format = PIX_FMT_YUV440P;
+ return 1;
+ default:
+ return 0;
+ }
}
SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags,
- SwsFilter *srcFilter, SwsFilter *dstFilter, double *param){
-
- SwsContext *c;
- int i;
- int usesVFilter, usesHFilter;
- int unscaled, needsDither;
- int srcRange, dstRange;
- SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param){
+
+ SwsContext *c;
+ int i;
+ int usesVFilter, usesHFilter;
+ int unscaled, needsDither;
+ int srcRange, dstRange;
+ SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
#if defined(ARCH_X86)
- if(flags & SWS_CPU_CAPS_MMX)
- asm volatile("emms\n\t"::: "memory");
+ if (flags & SWS_CPU_CAPS_MMX)
+ asm volatile("emms\n\t"::: "memory");
#endif
#if !defined(RUNTIME_CPUDETECT) || !defined (CONFIG_GPL) //ensure that the flags match the compiled variant if cpudetect is off
- flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC);
+ flags &= ~(SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2|SWS_CPU_CAPS_3DNOW|SWS_CPU_CAPS_ALTIVEC|SWS_CPU_CAPS_BFIN);
#ifdef HAVE_MMX2
- flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2;
+ flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_MMX2;
#elif defined (HAVE_3DNOW)
- flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_3DNOW;
+ flags |= SWS_CPU_CAPS_MMX|SWS_CPU_CAPS_3DNOW;
#elif defined (HAVE_MMX)
- flags |= SWS_CPU_CAPS_MMX;
+ flags |= SWS_CPU_CAPS_MMX;
#elif defined (HAVE_ALTIVEC)
- flags |= SWS_CPU_CAPS_ALTIVEC;
+ flags |= SWS_CPU_CAPS_ALTIVEC;
+#elif defined (ARCH_BFIN)
+ flags |= SWS_CPU_CAPS_BFIN;
#endif
#endif /* RUNTIME_CPUDETECT */
- if(clip_table[512] != 255) globalInit();
- if(rgb15to16 == NULL) sws_rgb2rgb_init(flags);
-
- unscaled = (srcW == dstW && srcH == dstH);
- needsDither= (isBGR(dstFormat) || isRGB(dstFormat))
- && (fmt_depth(dstFormat))<24
- && ((fmt_depth(dstFormat))<(fmt_depth(srcFormat)) || (!(isRGB(srcFormat) || isBGR(srcFormat))));
-
- srcRange = handle_jpeg(&srcFormat);
- dstRange = handle_jpeg(&dstFormat);
-
- if(!isSupportedIn(srcFormat))
- {
- av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as input format\n", sws_format_name(srcFormat));
- return NULL;
- }
- if(!isSupportedOut(dstFormat))
- {
- av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as output format\n", sws_format_name(dstFormat));
- return NULL;
- }
-
- /* sanity check */
- if(srcW<4 || srcH<1 || dstW<8 || dstH<1) //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code
- {
- av_log(NULL, AV_LOG_ERROR, "swScaler: %dx%d -> %dx%d is invalid scaling dimension\n",
- srcW, srcH, dstW, dstH);
- return NULL;
- }
-
- if(!dstFilter) dstFilter= &dummyFilter;
- if(!srcFilter) srcFilter= &dummyFilter;
-
- c= av_mallocz(sizeof(SwsContext));
-
- c->av_class = &sws_context_class;
- c->srcW= srcW;
- c->srcH= srcH;
- c->dstW= dstW;
- c->dstH= dstH;
- c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW;
- c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH;
- c->flags= flags;
- c->dstFormat= dstFormat;
- c->srcFormat= srcFormat;
- c->vRounder= 4* 0x0001000100010001ULL;
-
- usesHFilter= usesVFilter= 0;
- if(dstFilter->lumV!=NULL && dstFilter->lumV->length>1) usesVFilter=1;
- if(dstFilter->lumH!=NULL && dstFilter->lumH->length>1) usesHFilter=1;
- if(dstFilter->chrV!=NULL && dstFilter->chrV->length>1) usesVFilter=1;
- if(dstFilter->chrH!=NULL && dstFilter->chrH->length>1) usesHFilter=1;
- if(srcFilter->lumV!=NULL && srcFilter->lumV->length>1) usesVFilter=1;
- if(srcFilter->lumH!=NULL && srcFilter->lumH->length>1) usesHFilter=1;
- if(srcFilter->chrV!=NULL && srcFilter->chrV->length>1) usesVFilter=1;
- if(srcFilter->chrH!=NULL && srcFilter->chrH->length>1) usesHFilter=1;
-
- getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat);
- getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat);
-
- // reuse chroma for 2 pixles rgb/bgr unless user wants full chroma interpolation
- if((isBGR(dstFormat) || isRGB(dstFormat)) && !(flags&SWS_FULL_CHR_H_INT)) c->chrDstHSubSample=1;
-
- // drop some chroma lines if the user wants it
- c->vChrDrop= (flags&SWS_SRC_V_CHR_DROP_MASK)>>SWS_SRC_V_CHR_DROP_SHIFT;
- c->chrSrcVSubSample+= c->vChrDrop;
-
- // drop every 2. pixel for chroma calculation unless user wants full chroma
- if((isBGR(srcFormat) || isRGB(srcFormat)) && !(flags&SWS_FULL_CHR_H_INP))
- c->chrSrcHSubSample=1;
-
- if(param){
- c->param[0] = param[0];
- c->param[1] = param[1];
- }else{
- c->param[0] =
- c->param[1] = SWS_PARAM_DEFAULT;
- }
-
- c->chrIntHSubSample= c->chrDstHSubSample;
- c->chrIntVSubSample= c->chrSrcVSubSample;
-
- // note the -((-x)>>y) is so that we allways round toward +inf
- c->chrSrcW= -((-srcW) >> c->chrSrcHSubSample);
- c->chrSrcH= -((-srcH) >> c->chrSrcVSubSample);
- c->chrDstW= -((-dstW) >> c->chrDstHSubSample);
- c->chrDstH= -((-dstH) >> c->chrDstVSubSample);
-
- sws_setColorspaceDetails(c, Inverse_Table_6_9[SWS_CS_DEFAULT], srcRange, Inverse_Table_6_9[SWS_CS_DEFAULT] /* FIXME*/, dstRange, 0, 1<<16, 1<<16);
-
- /* unscaled special Cases */
- if(unscaled && !usesHFilter && !usesVFilter)
- {
- /* yv12_to_nv12 */
- if(srcFormat == PIX_FMT_YUV420P && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21))
- {
- c->swScale= PlanarToNV12Wrapper;
- }
+ if (clip_table[512] != 255) globalInit();
+ if (!rgb15to16) sws_rgb2rgb_init(flags);
+
+ unscaled = (srcW == dstW && srcH == dstH);
+ needsDither= (isBGR(dstFormat) || isRGB(dstFormat))
+ && (fmt_depth(dstFormat))<24
+ && ((fmt_depth(dstFormat))<(fmt_depth(srcFormat)) || (!(isRGB(srcFormat) || isBGR(srcFormat))));
+
+ srcRange = handle_jpeg(&srcFormat);
+ dstRange = handle_jpeg(&dstFormat);
+
+ if (!isSupportedIn(srcFormat))
+ {
+ av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as input pixel format\n", sws_format_name(srcFormat));
+ return NULL;
+ }
+ if (!isSupportedOut(dstFormat))
+ {
+ av_log(NULL, AV_LOG_ERROR, "swScaler: %s is not supported as output pixel format\n", sws_format_name(dstFormat));
+ return NULL;
+ }
+
+ /* sanity check */
+ if (srcW<4 || srcH<1 || dstW<8 || dstH<1) //FIXME check if these are enough and try to lowwer them after fixing the relevant parts of the code
+ {
+ av_log(NULL, AV_LOG_ERROR, "swScaler: %dx%d -> %dx%d is invalid scaling dimension\n",
+ srcW, srcH, dstW, dstH);
+ return NULL;
+ }
+
+ if (!dstFilter) dstFilter= &dummyFilter;
+ if (!srcFilter) srcFilter= &dummyFilter;
+
+ c= av_mallocz(sizeof(SwsContext));
+
+ c->av_class = &sws_context_class;
+ c->srcW= srcW;
+ c->srcH= srcH;
+ c->dstW= dstW;
+ c->dstH= dstH;
+ c->lumXInc= ((srcW<<16) + (dstW>>1))/dstW;
+ c->lumYInc= ((srcH<<16) + (dstH>>1))/dstH;
+ c->flags= flags;
+ c->dstFormat= dstFormat;
+ c->srcFormat= srcFormat;
+ c->vRounder= 4* 0x0001000100010001ULL;
+
+ usesHFilter= usesVFilter= 0;
+ if (dstFilter->lumV && dstFilter->lumV->length>1) usesVFilter=1;
+ if (dstFilter->lumH && dstFilter->lumH->length>1) usesHFilter=1;
+ if (dstFilter->chrV && dstFilter->chrV->length>1) usesVFilter=1;
+ if (dstFilter->chrH && dstFilter->chrH->length>1) usesHFilter=1;
+ if (srcFilter->lumV && srcFilter->lumV->length>1) usesVFilter=1;
+ if (srcFilter->lumH && srcFilter->lumH->length>1) usesHFilter=1;
+ if (srcFilter->chrV && srcFilter->chrV->length>1) usesVFilter=1;
+ if (srcFilter->chrH && srcFilter->chrH->length>1) usesHFilter=1;
+
+ getSubSampleFactors(&c->chrSrcHSubSample, &c->chrSrcVSubSample, srcFormat);
+ getSubSampleFactors(&c->chrDstHSubSample, &c->chrDstVSubSample, dstFormat);
+
+ // reuse chroma for 2 pixles rgb/bgr unless user wants full chroma interpolation
+ if ((isBGR(dstFormat) || isRGB(dstFormat)) && !(flags&SWS_FULL_CHR_H_INT)) c->chrDstHSubSample=1;
+
+ // drop some chroma lines if the user wants it
+ c->vChrDrop= (flags&SWS_SRC_V_CHR_DROP_MASK)>>SWS_SRC_V_CHR_DROP_SHIFT;
+ c->chrSrcVSubSample+= c->vChrDrop;
+
+ // drop every 2. pixel for chroma calculation unless user wants full chroma
+ if ((isBGR(srcFormat) || isRGB(srcFormat)) && !(flags&SWS_FULL_CHR_H_INP)
+ && srcFormat!=PIX_FMT_RGB8 && srcFormat!=PIX_FMT_BGR8
+ && srcFormat!=PIX_FMT_RGB4 && srcFormat!=PIX_FMT_BGR4
+ && srcFormat!=PIX_FMT_RGB4_BYTE && srcFormat!=PIX_FMT_BGR4_BYTE)
+ c->chrSrcHSubSample=1;
+
+ if (param){
+ c->param[0] = param[0];
+ c->param[1] = param[1];
+ }else{
+ c->param[0] =
+ c->param[1] = SWS_PARAM_DEFAULT;
+ }
+
+ c->chrIntHSubSample= c->chrDstHSubSample;
+ c->chrIntVSubSample= c->chrSrcVSubSample;
+
+ // Note the -((-x)>>y) is so that we always round toward +inf.
+ c->chrSrcW= -((-srcW) >> c->chrSrcHSubSample);
+ c->chrSrcH= -((-srcH) >> c->chrSrcVSubSample);
+ c->chrDstW= -((-dstW) >> c->chrDstHSubSample);
+ c->chrDstH= -((-dstH) >> c->chrDstVSubSample);
+
+ sws_setColorspaceDetails(c, Inverse_Table_6_9[SWS_CS_DEFAULT], srcRange, Inverse_Table_6_9[SWS_CS_DEFAULT] /* FIXME*/, dstRange, 0, 1<<16, 1<<16);
+
+ /* unscaled special Cases */
+ if (unscaled && !usesHFilter && !usesVFilter)
+ {
+ /* yv12_to_nv12 */
+ if (srcFormat == PIX_FMT_YUV420P && (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21))
+ {
+ c->swScale= PlanarToNV12Wrapper;
+ }
#ifdef CONFIG_GPL
- /* yuv2bgr */
- if((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P) && (isBGR(dstFormat) || isRGB(dstFormat)))
- {
- c->swScale= yuv2rgb_get_func_ptr(c);
- }
+ /* yuv2bgr */
+ if ((srcFormat==PIX_FMT_YUV420P || srcFormat==PIX_FMT_YUV422P) && (isBGR(dstFormat) || isRGB(dstFormat)))
+ {
+ c->swScale= yuv2rgb_get_func_ptr(c);
+ }
#endif
-
- if( srcFormat==PIX_FMT_YUV410P && dstFormat==PIX_FMT_YUV420P )
- {
- c->swScale= yvu9toyv12Wrapper;
- }
-
- /* bgr24toYV12 */
- if(srcFormat==PIX_FMT_BGR24 && dstFormat==PIX_FMT_YUV420P)
- c->swScale= bgr24toyv12Wrapper;
-
- /* rgb/bgr -> rgb/bgr (no dither needed forms) */
- if( (isBGR(srcFormat) || isRGB(srcFormat))
- && (isBGR(dstFormat) || isRGB(dstFormat))
- && !needsDither)
- c->swScale= rgb2rgbWrapper;
-
- /* LQ converters if -sws 0 or -sws 4*/
- if(c->flags&(SWS_FAST_BILINEAR|SWS_POINT)){
- /* rgb/bgr -> rgb/bgr (dither needed forms) */
- if( (isBGR(srcFormat) || isRGB(srcFormat))
- && (isBGR(dstFormat) || isRGB(dstFormat))
- && needsDither)
- c->swScale= rgb2rgbWrapper;
-
- /* yv12_to_yuy2 */
- if(srcFormat == PIX_FMT_YUV420P &&
- (dstFormat == PIX_FMT_YUYV422 || dstFormat == PIX_FMT_UYVY422))
- {
- if (dstFormat == PIX_FMT_YUYV422)
- c->swScale= PlanarToYuy2Wrapper;
- else
- c->swScale= PlanarToUyvyWrapper;
- }
- }
+
+ if (srcFormat==PIX_FMT_YUV410P && dstFormat==PIX_FMT_YUV420P)
+ {
+ c->swScale= yvu9toyv12Wrapper;
+ }
+
+ /* bgr24toYV12 */
+ if (srcFormat==PIX_FMT_BGR24 && dstFormat==PIX_FMT_YUV420P)
+ c->swScale= bgr24toyv12Wrapper;
+
+ /* rgb/bgr -> rgb/bgr (no dither needed forms) */
+ if ( (isBGR(srcFormat) || isRGB(srcFormat))
+ && (isBGR(dstFormat) || isRGB(dstFormat))
+ && srcFormat != PIX_FMT_BGR8 && dstFormat != PIX_FMT_BGR8
+ && srcFormat != PIX_FMT_RGB8 && dstFormat != PIX_FMT_RGB8
+ && srcFormat != PIX_FMT_BGR4 && dstFormat != PIX_FMT_BGR4
+ && srcFormat != PIX_FMT_RGB4 && dstFormat != PIX_FMT_RGB4
+ && srcFormat != PIX_FMT_BGR4_BYTE && dstFormat != PIX_FMT_BGR4_BYTE
+ && srcFormat != PIX_FMT_RGB4_BYTE && dstFormat != PIX_FMT_RGB4_BYTE
+ && srcFormat != PIX_FMT_MONOBLACK && dstFormat != PIX_FMT_MONOBLACK
+ && !needsDither)
+ c->swScale= rgb2rgbWrapper;
+
+ /* LQ converters if -sws 0 or -sws 4*/
+ if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)){
+ /* rgb/bgr -> rgb/bgr (dither needed forms) */
+ if ( (isBGR(srcFormat) || isRGB(srcFormat))
+ && (isBGR(dstFormat) || isRGB(dstFormat))
+ && needsDither)
+ c->swScale= rgb2rgbWrapper;
+
+ /* yv12_to_yuy2 */
+ if (srcFormat == PIX_FMT_YUV420P &&
+ (dstFormat == PIX_FMT_YUYV422 || dstFormat == PIX_FMT_UYVY422))
+ {
+ if (dstFormat == PIX_FMT_YUYV422)
+ c->swScale= PlanarToYuy2Wrapper;
+ else
+ c->swScale= PlanarToUyvyWrapper;
+ }
+ }
#ifdef COMPILE_ALTIVEC
- if ((c->flags & SWS_CPU_CAPS_ALTIVEC) &&
- ((srcFormat == PIX_FMT_YUV420P &&
- (dstFormat == PIX_FMT_YUYV422 || dstFormat == PIX_FMT_UYVY422)))) {
- // unscaled YV12 -> packed YUV, we want speed
- if (dstFormat == PIX_FMT_YUYV422)
- c->swScale= yv12toyuy2_unscaled_altivec;
- else
- c->swScale= yv12touyvy_unscaled_altivec;
- }
+ if ((c->flags & SWS_CPU_CAPS_ALTIVEC) &&
+ ((srcFormat == PIX_FMT_YUV420P &&
+ (dstFormat == PIX_FMT_YUYV422 || dstFormat == PIX_FMT_UYVY422)))) {
+ // unscaled YV12 -> packed YUV, we want speed
+ if (dstFormat == PIX_FMT_YUYV422)
+ c->swScale= yv12toyuy2_unscaled_altivec;
+ else
+ c->swScale= yv12touyvy_unscaled_altivec;
+ }
+#endif
+
+ /* simple copy */
+ if ( srcFormat == dstFormat
+ || (isPlanarYUV(srcFormat) && isGray(dstFormat))
+ || (isPlanarYUV(dstFormat) && isGray(srcFormat)))
+ {
+ c->swScale= simpleCopy;
+ }
+
+ /* gray16{le,be} conversions */
+ if (isGray16(srcFormat) && (isPlanarYUV(dstFormat) || (dstFormat == PIX_FMT_GRAY8)))
+ {
+ c->swScale= gray16togray;
+ }
+ if ((isPlanarYUV(srcFormat) || (srcFormat == PIX_FMT_GRAY8)) && isGray16(dstFormat))
+ {
+ c->swScale= graytogray16;
+ }
+ if (srcFormat != dstFormat && isGray16(srcFormat) && isGray16(dstFormat))
+ {
+ c->swScale= gray16swap;
+ }
+
+#ifdef ARCH_BFIN
+ if (flags & SWS_CPU_CAPS_BFIN)
+ ff_bfin_get_unscaled_swscale (c);
#endif
- /* simple copy */
- if( srcFormat == dstFormat
- || (isPlanarYUV(srcFormat) && isGray(dstFormat))
- || (isPlanarYUV(dstFormat) && isGray(srcFormat))
- )
- {
- c->swScale= simpleCopy;
- }
-
- /* gray16{le,be} conversions */
- if(isGray16(srcFormat) && (isPlanarYUV(dstFormat) || (dstFormat == PIX_FMT_GRAY8)))
- {
- c->swScale= gray16togray;
- }
- if((isPlanarYUV(srcFormat) || (srcFormat == PIX_FMT_GRAY8)) && isGray16(dstFormat))
- {
- c->swScale= graytogray16;
- }
- if(srcFormat != dstFormat && isGray16(srcFormat) && isGray16(dstFormat))
- {
- c->swScale= gray16swap;
- }
-
- if(c->swScale){
- if(flags&SWS_PRINT_INFO)
- av_log(c, AV_LOG_INFO, "SwScaler: using unscaled %s -> %s special converter\n",
- sws_format_name(srcFormat), sws_format_name(dstFormat));
- return c;
- }
- }
-
- if(flags & SWS_CPU_CAPS_MMX2)
- {
- c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
- if(!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR))
- {
- if(flags&SWS_PRINT_INFO)
- av_log(c, AV_LOG_INFO, "SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n");
- }
- if(usesHFilter) c->canMMX2BeUsed=0;
- }
- else
- c->canMMX2BeUsed=0;
-
- c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW;
- c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH;
-
- // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
- // but only for the FAST_BILINEAR mode otherwise do correct scaling
- // n-2 is the last chrominance sample available
- // this is not perfect, but noone shuld notice the difference, the more correct variant
- // would be like the vertical one, but that would require some special code for the
- // first and last pixel
- if(flags&SWS_FAST_BILINEAR)
- {
- if(c->canMMX2BeUsed)
- {
- c->lumXInc+= 20;
- c->chrXInc+= 20;
- }
- //we don't use the x86asm scaler if mmx is available
- else if(flags & SWS_CPU_CAPS_MMX)
- {
- c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
- c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20;
- }
- }
-
- /* precalculate horizontal scaler filter coefficients */
- {
- const int filterAlign=
- (flags & SWS_CPU_CAPS_MMX) ? 4 :
- (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
- 1;
-
- initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
- srcW , dstW, filterAlign, 1<<14,
- (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
- srcFilter->lumH, dstFilter->lumH, c->param);
- initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
- c->chrSrcW, c->chrDstW, filterAlign, 1<<14,
- (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
- srcFilter->chrH, dstFilter->chrH, c->param);
+ if (c->swScale){
+ if (flags&SWS_PRINT_INFO)
+ av_log(c, AV_LOG_INFO, "SwScaler: using unscaled %s -> %s special converter\n",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+ return c;
+ }
+ }
+
+ if (flags & SWS_CPU_CAPS_MMX2)
+ {
+ c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
+ if (!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR))
+ {
+ if (flags&SWS_PRINT_INFO)
+ av_log(c, AV_LOG_INFO, "SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n");
+ }
+ if (usesHFilter) c->canMMX2BeUsed=0;
+ }
+ else
+ c->canMMX2BeUsed=0;
+
+ c->chrXInc= ((c->chrSrcW<<16) + (c->chrDstW>>1))/c->chrDstW;
+ c->chrYInc= ((c->chrSrcH<<16) + (c->chrDstH>>1))/c->chrDstH;
+
+ // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
+ // but only for the FAST_BILINEAR mode otherwise do correct scaling
+ // n-2 is the last chrominance sample available
+ // this is not perfect, but no one should notice the difference, the more correct variant
+ // would be like the vertical one, but that would require some special code for the
+ // first and last pixel
+ if (flags&SWS_FAST_BILINEAR)
+ {
+ if (c->canMMX2BeUsed)
+ {
+ c->lumXInc+= 20;
+ c->chrXInc+= 20;
+ }
+ //we don't use the x86asm scaler if mmx is available
+ else if (flags & SWS_CPU_CAPS_MMX)
+ {
+ c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
+ c->chrXInc = ((c->chrSrcW-2)<<16)/(c->chrDstW-2) - 20;
+ }
+ }
+
+ /* precalculate horizontal scaler filter coefficients */
+ {
+ const int filterAlign=
+ (flags & SWS_CPU_CAPS_MMX) ? 4 :
+ (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
+
+ initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
+ srcW , dstW, filterAlign, 1<<14,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
+ srcFilter->lumH, dstFilter->lumH, c->param);
+ initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
+ c->chrSrcW, c->chrDstW, filterAlign, 1<<14,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
+ srcFilter->chrH, dstFilter->chrH, c->param);
#define MAX_FUNNY_CODE_SIZE 10000
#if defined(COMPILE_MMX2)
// can't downscale !!!
- if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
- {
+ if (c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
+ {
#ifdef MAP_ANONYMOUS
- c->funnyYCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
- c->funnyUVCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ c->funnyYCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
+ c->funnyUVCode = (uint8_t*)mmap(NULL, MAX_FUNNY_CODE_SIZE, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
#else
- c->funnyYCode = av_malloc(MAX_FUNNY_CODE_SIZE);
- c->funnyUVCode = av_malloc(MAX_FUNNY_CODE_SIZE);
+ c->funnyYCode = av_malloc(MAX_FUNNY_CODE_SIZE);
+ c->funnyUVCode = av_malloc(MAX_FUNNY_CODE_SIZE);
#endif
- c->lumMmx2Filter = av_malloc((dstW /8+8)*sizeof(int16_t));
- c->chrMmx2Filter = av_malloc((c->chrDstW /4+8)*sizeof(int16_t));
- c->lumMmx2FilterPos= av_malloc((dstW /2/8+8)*sizeof(int32_t));
- c->chrMmx2FilterPos= av_malloc((c->chrDstW/2/4+8)*sizeof(int32_t));
+ c->lumMmx2Filter = av_malloc((dstW /8+8)*sizeof(int16_t));
+ c->chrMmx2Filter = av_malloc((c->chrDstW /4+8)*sizeof(int16_t));
+ c->lumMmx2FilterPos= av_malloc((dstW /2/8+8)*sizeof(int32_t));
+ c->chrMmx2FilterPos= av_malloc((c->chrDstW/2/4+8)*sizeof(int32_t));
- initMMX2HScaler( dstW, c->lumXInc, c->funnyYCode , c->lumMmx2Filter, c->lumMmx2FilterPos, 8);
- initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode, c->chrMmx2Filter, c->chrMmx2FilterPos, 4);
- }
+ initMMX2HScaler( dstW, c->lumXInc, c->funnyYCode , c->lumMmx2Filter, c->lumMmx2FilterPos, 8);
+ initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode, c->chrMmx2Filter, c->chrMmx2FilterPos, 4);
+ }
#endif /* defined(COMPILE_MMX2) */
- } // Init Horizontal stuff
+ } // Init Horizontal stuff
- /* precalculate vertical scaler filter coefficients */
- {
- const int filterAlign=
- (flags & SWS_CPU_CAPS_MMX) && (flags & SWS_ACCURATE_RND) ? 2 :
- (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
- 1;
+ /* precalculate vertical scaler filter coefficients */
+ {
+ const int filterAlign=
+ (flags & SWS_CPU_CAPS_MMX) && (flags & SWS_ACCURATE_RND) ? 2 :
+ (flags & SWS_CPU_CAPS_ALTIVEC) ? 8 :
+ 1;
- initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
- srcH , dstH, filterAlign, (1<<12)-4,
- (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
- srcFilter->lumV, dstFilter->lumV, c->param);
- initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
- c->chrSrcH, c->chrDstH, filterAlign, (1<<12)-4,
- (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
- srcFilter->chrV, dstFilter->chrV, c->param);
+ initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
+ srcH , dstH, filterAlign, (1<<12)-4,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BICUBIC) : flags,
+ srcFilter->lumV, dstFilter->lumV, c->param);
+ initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
+ c->chrSrcH, c->chrDstH, filterAlign, (1<<12)-4,
+ (flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
+ srcFilter->chrV, dstFilter->chrV, c->param);
#ifdef HAVE_ALTIVEC
- c->vYCoeffsBank = av_malloc(sizeof (vector signed short)*c->vLumFilterSize*c->dstH);
- c->vCCoeffsBank = av_malloc(sizeof (vector signed short)*c->vChrFilterSize*c->chrDstH);
-
- for (i=0;i<c->vLumFilterSize*c->dstH;i++) {
- int j;
- short *p = (short *)&c->vYCoeffsBank[i];
- for (j=0;j<8;j++)
- p[j] = c->vLumFilter[i];
- }
-
- for (i=0;i<c->vChrFilterSize*c->chrDstH;i++) {
- int j;
- short *p = (short *)&c->vCCoeffsBank[i];
- for (j=0;j<8;j++)
- p[j] = c->vChrFilter[i];
- }
+ c->vYCoeffsBank = av_malloc(sizeof (vector signed short)*c->vLumFilterSize*c->dstH);
+ c->vCCoeffsBank = av_malloc(sizeof (vector signed short)*c->vChrFilterSize*c->chrDstH);
+
+ for (i=0;i<c->vLumFilterSize*c->dstH;i++) {
+ int j;
+ short *p = (short *)&c->vYCoeffsBank[i];
+ for (j=0;j<8;j++)
+ p[j] = c->vLumFilter[i];
+ }
+
+ for (i=0;i<c->vChrFilterSize*c->chrDstH;i++) {
+ int j;
+ short *p = (short *)&c->vCCoeffsBank[i];
+ for (j=0;j<8;j++)
+ p[j] = c->vChrFilter[i];
+ }
#endif
- }
-
- // Calculate Buffer Sizes so that they won't run out while handling these damn slices
- c->vLumBufSize= c->vLumFilterSize;
- c->vChrBufSize= c->vChrFilterSize;
- for(i=0; i<dstH; i++)
- {
- int chrI= i*c->chrDstH / dstH;
- int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
- ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
-
- nextSlice>>= c->chrSrcVSubSample;
- nextSlice<<= c->chrSrcVSubSample;
- if(c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice)
- c->vLumBufSize= nextSlice - c->vLumFilterPos[i ];
- if(c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample))
- c->vChrBufSize= (nextSlice>>c->chrSrcVSubSample) - c->vChrFilterPos[chrI];
- }
-
- // allocate pixbufs (we use dynamic allocation because otherwise we would need to
- c->lumPixBuf= av_malloc(c->vLumBufSize*2*sizeof(int16_t*));
- c->chrPixBuf= av_malloc(c->vChrBufSize*2*sizeof(int16_t*));
- //Note we need at least one pixel more at the end because of the mmx code (just in case someone wanna replace the 4000/8000)
- /* align at 16 bytes for AltiVec */
- for(i=0; i<c->vLumBufSize; i++)
- c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= av_mallocz(4000);
- for(i=0; i<c->vChrBufSize; i++)
- c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= av_malloc(8000);
-
- //try to avoid drawing green stuff between the right end and the stride end
- for(i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, 8000);
-
- ASSERT(c->chrDstH <= dstH)
-
- if(flags&SWS_PRINT_INFO)
- {
+ }
+
+ // Calculate Buffer Sizes so that they won't run out while handling these damn slices
+ c->vLumBufSize= c->vLumFilterSize;
+ c->vChrBufSize= c->vChrFilterSize;
+ for (i=0; i<dstH; i++)
+ {
+ int chrI= i*c->chrDstH / dstH;
+ int nextSlice= FFMAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
+ ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<c->chrSrcVSubSample));
+
+ nextSlice>>= c->chrSrcVSubSample;
+ nextSlice<<= c->chrSrcVSubSample;
+ if (c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice)
+ c->vLumBufSize= nextSlice - c->vLumFilterPos[i];
+ if (c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>c->chrSrcVSubSample))
+ c->vChrBufSize= (nextSlice>>c->chrSrcVSubSample) - c->vChrFilterPos[chrI];
+ }
+
+ // allocate pixbufs (we use dynamic allocation because otherwise we would need to
+ c->lumPixBuf= av_malloc(c->vLumBufSize*2*sizeof(int16_t*));
+ c->chrPixBuf= av_malloc(c->vChrBufSize*2*sizeof(int16_t*));
+ //Note we need at least one pixel more at the end because of the mmx code (just in case someone wanna replace the 4000/8000)
+ /* align at 16 bytes for AltiVec */
+ for (i=0; i<c->vLumBufSize; i++)
+ c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= av_mallocz(4000);
+ for (i=0; i<c->vChrBufSize; i++)
+ c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= av_malloc(8000);
+
+ //try to avoid drawing green stuff between the right end and the stride end
+ for (i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, 8000);
+
+ ASSERT(c->chrDstH <= dstH)
+
+ if (flags&SWS_PRINT_INFO)
+ {
#ifdef DITHER1XBPP
- char *dither= " dithered";
+ char *dither= " dithered";
#else
- char *dither= "";
+ char *dither= "";
#endif
- if(flags&SWS_FAST_BILINEAR)
- av_log(c, AV_LOG_INFO, "SwScaler: FAST_BILINEAR scaler, ");
- else if(flags&SWS_BILINEAR)
- av_log(c, AV_LOG_INFO, "SwScaler: BILINEAR scaler, ");
- else if(flags&SWS_BICUBIC)
- av_log(c, AV_LOG_INFO, "SwScaler: BICUBIC scaler, ");
- else if(flags&SWS_X)
- av_log(c, AV_LOG_INFO, "SwScaler: Experimental scaler, ");
- else if(flags&SWS_POINT)
- av_log(c, AV_LOG_INFO, "SwScaler: Nearest Neighbor / POINT scaler, ");
- else if(flags&SWS_AREA)
- av_log(c, AV_LOG_INFO, "SwScaler: Area Averageing scaler, ");
- else if(flags&SWS_BICUBLIN)
- av_log(c, AV_LOG_INFO, "SwScaler: luma BICUBIC / chroma BILINEAR scaler, ");
- else if(flags&SWS_GAUSS)
- av_log(c, AV_LOG_INFO, "SwScaler: Gaussian scaler, ");
- else if(flags&SWS_SINC)
- av_log(c, AV_LOG_INFO, "SwScaler: Sinc scaler, ");
- else if(flags&SWS_LANCZOS)
- av_log(c, AV_LOG_INFO, "SwScaler: Lanczos scaler, ");
- else if(flags&SWS_SPLINE)
- av_log(c, AV_LOG_INFO, "SwScaler: Bicubic spline scaler, ");
- else
- av_log(c, AV_LOG_INFO, "SwScaler: ehh flags invalid?! ");
-
- if(dstFormat==PIX_FMT_BGR555 || dstFormat==PIX_FMT_BGR565)
- av_log(c, AV_LOG_INFO, "from %s to%s %s ",
- sws_format_name(srcFormat), dither, sws_format_name(dstFormat));
- else
- av_log(c, AV_LOG_INFO, "from %s to %s ",
- sws_format_name(srcFormat), sws_format_name(dstFormat));
-
- if(flags & SWS_CPU_CAPS_MMX2)
- av_log(c, AV_LOG_INFO, "using MMX2\n");
- else if(flags & SWS_CPU_CAPS_3DNOW)
- av_log(c, AV_LOG_INFO, "using 3DNOW\n");
- else if(flags & SWS_CPU_CAPS_MMX)
- av_log(c, AV_LOG_INFO, "using MMX\n");
- else if(flags & SWS_CPU_CAPS_ALTIVEC)
- av_log(c, AV_LOG_INFO, "using AltiVec\n");
- else
- av_log(c, AV_LOG_INFO, "using C\n");
- }
-
- if(flags & SWS_PRINT_INFO)
- {
- if(flags & SWS_CPU_CAPS_MMX)
- {
- if(c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR))
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
- else
- {
- if(c->hLumFilterSize==4)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n");
- else if(c->hLumFilterSize==8)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n");
- else
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n");
-
- if(c->hChrFilterSize==4)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n");
- else if(c->hChrFilterSize==8)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n");
- else
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n");
- }
- }
- else
- {
+ if (flags&SWS_FAST_BILINEAR)
+ av_log(c, AV_LOG_INFO, "SwScaler: FAST_BILINEAR scaler, ");
+ else if (flags&SWS_BILINEAR)
+ av_log(c, AV_LOG_INFO, "SwScaler: BILINEAR scaler, ");
+ else if (flags&SWS_BICUBIC)
+ av_log(c, AV_LOG_INFO, "SwScaler: BICUBIC scaler, ");
+ else if (flags&SWS_X)
+ av_log(c, AV_LOG_INFO, "SwScaler: Experimental scaler, ");
+ else if (flags&SWS_POINT)
+ av_log(c, AV_LOG_INFO, "SwScaler: Nearest Neighbor / POINT scaler, ");
+ else if (flags&SWS_AREA)
+ av_log(c, AV_LOG_INFO, "SwScaler: Area Averageing scaler, ");
+ else if (flags&SWS_BICUBLIN)
+ av_log(c, AV_LOG_INFO, "SwScaler: luma BICUBIC / chroma BILINEAR scaler, ");
+ else if (flags&SWS_GAUSS)
+ av_log(c, AV_LOG_INFO, "SwScaler: Gaussian scaler, ");
+ else if (flags&SWS_SINC)
+ av_log(c, AV_LOG_INFO, "SwScaler: Sinc scaler, ");
+ else if (flags&SWS_LANCZOS)
+ av_log(c, AV_LOG_INFO, "SwScaler: Lanczos scaler, ");
+ else if (flags&SWS_SPLINE)
+ av_log(c, AV_LOG_INFO, "SwScaler: Bicubic spline scaler, ");
+ else
+ av_log(c, AV_LOG_INFO, "SwScaler: ehh flags invalid?! ");
+
+ if (dstFormat==PIX_FMT_BGR555 || dstFormat==PIX_FMT_BGR565)
+ av_log(c, AV_LOG_INFO, "from %s to%s %s ",
+ sws_format_name(srcFormat), dither, sws_format_name(dstFormat));
+ else
+ av_log(c, AV_LOG_INFO, "from %s to %s ",
+ sws_format_name(srcFormat), sws_format_name(dstFormat));
+
+ if (flags & SWS_CPU_CAPS_MMX2)
+ av_log(c, AV_LOG_INFO, "using MMX2\n");
+ else if (flags & SWS_CPU_CAPS_3DNOW)
+ av_log(c, AV_LOG_INFO, "using 3DNOW\n");
+ else if (flags & SWS_CPU_CAPS_MMX)
+ av_log(c, AV_LOG_INFO, "using MMX\n");
+ else if (flags & SWS_CPU_CAPS_ALTIVEC)
+ av_log(c, AV_LOG_INFO, "using AltiVec\n");
+ else
+ av_log(c, AV_LOG_INFO, "using C\n");
+ }
+
+ if (flags & SWS_PRINT_INFO)
+ {
+ if (flags & SWS_CPU_CAPS_MMX)
+ {
+ if (c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR))
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
+ else
+ {
+ if (c->hLumFilterSize==4)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n");
+ else if (c->hLumFilterSize==8)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n");
+ else
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n");
+
+ if (c->hChrFilterSize==4)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n");
+ else if (c->hChrFilterSize==8)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n");
+ else
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n");
+ }
+ }
+ else
+ {
#if defined(ARCH_X86)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using X86-Asm scaler for horizontal scaling\n");
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using X86-Asm scaler for horizontal scaling\n");
#else
- if(flags & SWS_FAST_BILINEAR)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n");
- else
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using C scaler for horizontal scaling\n");
+ if (flags & SWS_FAST_BILINEAR)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n");
+ else
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using C scaler for horizontal scaling\n");
#endif
- }
- if(isPlanarYUV(dstFormat))
- {
- if(c->vLumFilterSize==1)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
- else
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
- }
- else
- {
- if(c->vLumFilterSize==1 && c->vChrFilterSize==2)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
- "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",(flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
- else if(c->vLumFilterSize==2 && c->vChrFilterSize==2)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
- else
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
- }
-
- if(dstFormat==PIX_FMT_BGR24)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using %s YV12->BGR24 Converter\n",
- (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"));
- else if(dstFormat==PIX_FMT_RGB32)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using %s YV12->BGR32 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
- else if(dstFormat==PIX_FMT_BGR565)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using %s YV12->BGR16 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
- else if(dstFormat==PIX_FMT_BGR555)
- av_log(c, AV_LOG_VERBOSE, "SwScaler: using %s YV12->BGR15 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
-
- av_log(c, AV_LOG_VERBOSE, "SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
- }
- if(flags & SWS_PRINT_INFO)
- {
- av_log(c, AV_LOG_DEBUG, "SwScaler:Lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
- c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
- av_log(c, AV_LOG_DEBUG, "SwScaler:Chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
- c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc);
- }
-
- c->swScale= getSwsFunc(flags);
- return c;
+ }
+ if (isPlanarYUV(dstFormat))
+ {
+ if (c->vLumFilterSize==1)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using n-tap %s scaler for vertical scaling (YV12 like)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ }
+ else
+ {
+ if (c->vLumFilterSize==1 && c->vChrFilterSize==2)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
+ "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if (c->vLumFilterSize==2 && c->vChrFilterSize==2)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ }
+
+ if (dstFormat==PIX_FMT_BGR24)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using %s YV12->BGR24 Converter\n",
+ (flags & SWS_CPU_CAPS_MMX2) ? "MMX2" : ((flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C"));
+ else if (dstFormat==PIX_FMT_RGB32)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using %s YV12->BGR32 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if (dstFormat==PIX_FMT_BGR565)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using %s YV12->BGR16 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+ else if (dstFormat==PIX_FMT_BGR555)
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: using %s YV12->BGR15 Converter\n", (flags & SWS_CPU_CAPS_MMX) ? "MMX" : "C");
+
+ av_log(c, AV_LOG_VERBOSE, "SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
+ }
+ if (flags & SWS_PRINT_INFO)
+ {
+ av_log(c, AV_LOG_DEBUG, "SwScaler:Lum srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
+ c->srcW, c->srcH, c->dstW, c->dstH, c->lumXInc, c->lumYInc);
+ av_log(c, AV_LOG_DEBUG, "SwScaler:Chr srcW=%d srcH=%d dstW=%d dstH=%d xInc=%d yInc=%d\n",
+ c->chrSrcW, c->chrSrcH, c->chrDstW, c->chrDstH, c->chrXInc, c->chrYInc);
+ }
+
+ c->swScale= getSwsFunc(flags);
+ return c;
}
/**
- * swscale warper, so we don't need to export the SwsContext.
+ * swscale wrapper, so we don't need to export the SwsContext.
* assumes planar YUV to be in YUV order instead of YVU
*/
int sws_scale(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
- if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
- av_log(c, AV_LOG_ERROR, "swScaler: slices start in the middle!\n");
- return 0;
- }
- if (c->sliceDir == 0) {
- if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
- }
-
- // copy strides, so they can safely be modified
- if (c->sliceDir == 1) {
- uint8_t* src2[4]= {src[0], src[1], src[2]};
- // slices go from top to bottom
- int srcStride2[4]= {srcStride[0], srcStride[1], srcStride[2]};
- int dstStride2[4]= {dstStride[0], dstStride[1], dstStride[2]};
- return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst, dstStride2);
- } else {
- // slices go from bottom to top => we flip the image internally
- uint8_t* src2[4]= {src[0] + (srcSliceH-1)*srcStride[0],
- src[1] + ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1],
- src[2] + ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2]
- };
- uint8_t* dst2[4]= {dst[0] + (c->dstH-1)*dstStride[0],
- dst[1] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1],
- dst[2] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2]};
- int srcStride2[4]= {-srcStride[0], -srcStride[1], -srcStride[2]};
- int dstStride2[4]= {-dstStride[0], -dstStride[1], -dstStride[2]};
-
- return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2);
- }
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int i;
+ uint8_t* src2[4]= {src[0], src[1], src[2]};
+ uint32_t pal[256];
+ if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
+ av_log(c, AV_LOG_ERROR, "swScaler: slices start in the middle!\n");
+ return 0;
+ }
+ if (c->sliceDir == 0) {
+ if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
+ }
+
+ if (c->srcFormat == PIX_FMT_PAL8){
+ for (i=0; i<256; i++){
+ int p= ((uint32_t*)(src[1]))[i];
+ int r= (p>>16)&0xFF;
+ int g= (p>> 8)&0xFF;
+ int b= p &0xFF;
+ int y= av_clip_uint8(((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16 );
+ int u= av_clip_uint8(((RU*r + GU*g + BU*b)>>RGB2YUV_SHIFT) + 128);
+ int v= av_clip_uint8(((RV*r + GV*g + BV*b)>>RGB2YUV_SHIFT) + 128);
+ pal[i]= y + (u<<8) + (v<<16);
+ }
+ src2[1]= pal;
+ }
+
+ // copy strides, so they can safely be modified
+ if (c->sliceDir == 1) {
+ // slices go from top to bottom
+ int srcStride2[4]= {srcStride[0], srcStride[1], srcStride[2]};
+ int dstStride2[4]= {dstStride[0], dstStride[1], dstStride[2]};
+ return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst, dstStride2);
+ } else {
+ // slices go from bottom to top => we flip the image internally
+ uint8_t* dst2[4]= {dst[0] + (c->dstH-1)*dstStride[0],
+ dst[1] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[1],
+ dst[2] + ((c->dstH>>c->chrDstVSubSample)-1)*dstStride[2]};
+ int srcStride2[4]= {-srcStride[0], -srcStride[1], -srcStride[2]};
+ int dstStride2[4]= {-dstStride[0], -dstStride[1], -dstStride[2]};
+
+ src2[0] += (srcSliceH-1)*srcStride[0];
+ if (c->srcFormat != PIX_FMT_PAL8)
+ src2[1] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[1];
+ src2[2] += ((srcSliceH>>c->chrSrcVSubSample)-1)*srcStride[2];
+
+ return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH, srcSliceH, dst2, dstStride2);
+ }
}
/**
- * swscale warper, so we don't need to export the SwsContext
+ * swscale wrapper, so we don't need to export the SwsContext
*/
int sws_scale_ordered(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
- return sws_scale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ return sws_scale(c, src, srcStride, srcSliceY, srcSliceH, dst, dstStride);
}
-SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
- float lumaSharpen, float chromaSharpen,
- float chromaHShift, float chromaVShift,
- int verbose)
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+ float lumaSharpen, float chromaSharpen,
+ float chromaHShift, float chromaVShift,
+ int verbose)
{
- SwsFilter *filter= av_malloc(sizeof(SwsFilter));
-
- if(lumaGBlur!=0.0){
- filter->lumH= sws_getGaussianVec(lumaGBlur, 3.0);
- filter->lumV= sws_getGaussianVec(lumaGBlur, 3.0);
- }else{
- filter->lumH= sws_getIdentityVec();
- filter->lumV= sws_getIdentityVec();
- }
-
- if(chromaGBlur!=0.0){
- filter->chrH= sws_getGaussianVec(chromaGBlur, 3.0);
- filter->chrV= sws_getGaussianVec(chromaGBlur, 3.0);
- }else{
- filter->chrH= sws_getIdentityVec();
- filter->chrV= sws_getIdentityVec();
- }
-
- if(chromaSharpen!=0.0){
- SwsVector *id= sws_getIdentityVec();
- sws_scaleVec(filter->chrH, -chromaSharpen);
- sws_scaleVec(filter->chrV, -chromaSharpen);
- sws_addVec(filter->chrH, id);
- sws_addVec(filter->chrV, id);
- sws_freeVec(id);
- }
-
- if(lumaSharpen!=0.0){
- SwsVector *id= sws_getIdentityVec();
- sws_scaleVec(filter->lumH, -lumaSharpen);
- sws_scaleVec(filter->lumV, -lumaSharpen);
- sws_addVec(filter->lumH, id);
- sws_addVec(filter->lumV, id);
- sws_freeVec(id);
- }
-
- if(chromaHShift != 0.0)
- sws_shiftVec(filter->chrH, (int)(chromaHShift+0.5));
-
- if(chromaVShift != 0.0)
- sws_shiftVec(filter->chrV, (int)(chromaVShift+0.5));
-
- sws_normalizeVec(filter->chrH, 1.0);
- sws_normalizeVec(filter->chrV, 1.0);
- sws_normalizeVec(filter->lumH, 1.0);
- sws_normalizeVec(filter->lumV, 1.0);
-
- if(verbose) sws_printVec(filter->chrH);
- if(verbose) sws_printVec(filter->lumH);
-
- return filter;
+ SwsFilter *filter= av_malloc(sizeof(SwsFilter));
+
+ if (lumaGBlur!=0.0){
+ filter->lumH= sws_getGaussianVec(lumaGBlur, 3.0);
+ filter->lumV= sws_getGaussianVec(lumaGBlur, 3.0);
+ }else{
+ filter->lumH= sws_getIdentityVec();
+ filter->lumV= sws_getIdentityVec();
+ }
+
+ if (chromaGBlur!=0.0){
+ filter->chrH= sws_getGaussianVec(chromaGBlur, 3.0);
+ filter->chrV= sws_getGaussianVec(chromaGBlur, 3.0);
+ }else{
+ filter->chrH= sws_getIdentityVec();
+ filter->chrV= sws_getIdentityVec();
+ }
+
+ if (chromaSharpen!=0.0){
+ SwsVector *id= sws_getIdentityVec();
+ sws_scaleVec(filter->chrH, -chromaSharpen);
+ sws_scaleVec(filter->chrV, -chromaSharpen);
+ sws_addVec(filter->chrH, id);
+ sws_addVec(filter->chrV, id);
+ sws_freeVec(id);
+ }
+
+ if (lumaSharpen!=0.0){
+ SwsVector *id= sws_getIdentityVec();
+ sws_scaleVec(filter->lumH, -lumaSharpen);
+ sws_scaleVec(filter->lumV, -lumaSharpen);
+ sws_addVec(filter->lumH, id);
+ sws_addVec(filter->lumV, id);
+ sws_freeVec(id);
+ }
+
+ if (chromaHShift != 0.0)
+ sws_shiftVec(filter->chrH, (int)(chromaHShift+0.5));
+
+ if (chromaVShift != 0.0)
+ sws_shiftVec(filter->chrV, (int)(chromaVShift+0.5));
+
+ sws_normalizeVec(filter->chrH, 1.0);
+ sws_normalizeVec(filter->chrV, 1.0);
+ sws_normalizeVec(filter->lumH, 1.0);
+ sws_normalizeVec(filter->lumV, 1.0);
+
+ if (verbose) sws_printVec(filter->chrH);
+ if (verbose) sws_printVec(filter->lumH);
+
+ return filter;
}
/**
@@ -2533,303 +2645,303 @@ SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
* quality=3 is high quality, lowwer is lowwer quality
*/
SwsVector *sws_getGaussianVec(double variance, double quality){
- const int length= (int)(variance*quality + 0.5) | 1;
- int i;
- double *coeff= av_malloc(length*sizeof(double));
- double middle= (length-1)*0.5;
- SwsVector *vec= av_malloc(sizeof(SwsVector));
+ const int length= (int)(variance*quality + 0.5) | 1;
+ int i;
+ double *coeff= av_malloc(length*sizeof(double));
+ double middle= (length-1)*0.5;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
- vec->coeff= coeff;
- vec->length= length;
+ vec->coeff= coeff;
+ vec->length= length;
- for(i=0; i<length; i++)
- {
- double dist= i-middle;
- coeff[i]= exp( -dist*dist/(2*variance*variance) ) / sqrt(2*variance*PI);
- }
+ for (i=0; i<length; i++)
+ {
+ double dist= i-middle;
+ coeff[i]= exp(-dist*dist/(2*variance*variance)) / sqrt(2*variance*PI);
+ }
- sws_normalizeVec(vec, 1.0);
+ sws_normalizeVec(vec, 1.0);
- return vec;
+ return vec;
}
SwsVector *sws_getConstVec(double c, int length){
- int i;
- double *coeff= av_malloc(length*sizeof(double));
- SwsVector *vec= av_malloc(sizeof(SwsVector));
+ int i;
+ double *coeff= av_malloc(length*sizeof(double));
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
- vec->coeff= coeff;
- vec->length= length;
+ vec->coeff= coeff;
+ vec->length= length;
- for(i=0; i<length; i++)
- coeff[i]= c;
+ for (i=0; i<length; i++)
+ coeff[i]= c;
- return vec;
+ return vec;
}
SwsVector *sws_getIdentityVec(void){
- return sws_getConstVec(1.0, 1);
+ return sws_getConstVec(1.0, 1);
}
double sws_dcVec(SwsVector *a){
- int i;
- double sum=0;
+ int i;
+ double sum=0;
- for(i=0; i<a->length; i++)
- sum+= a->coeff[i];
+ for (i=0; i<a->length; i++)
+ sum+= a->coeff[i];
- return sum;
+ return sum;
}
void sws_scaleVec(SwsVector *a, double scalar){
- int i;
+ int i;
- for(i=0; i<a->length; i++)
- a->coeff[i]*= scalar;
+ for (i=0; i<a->length; i++)
+ a->coeff[i]*= scalar;
}
void sws_normalizeVec(SwsVector *a, double height){
- sws_scaleVec(a, height/sws_dcVec(a));
+ sws_scaleVec(a, height/sws_dcVec(a));
}
static SwsVector *sws_getConvVec(SwsVector *a, SwsVector *b){
- int length= a->length + b->length - 1;
- double *coeff= av_malloc(length*sizeof(double));
- int i, j;
- SwsVector *vec= av_malloc(sizeof(SwsVector));
+ int length= a->length + b->length - 1;
+ double *coeff= av_malloc(length*sizeof(double));
+ int i, j;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
- vec->coeff= coeff;
- vec->length= length;
+ vec->coeff= coeff;
+ vec->length= length;
- for(i=0; i<length; i++) coeff[i]= 0.0;
+ for (i=0; i<length; i++) coeff[i]= 0.0;
- for(i=0; i<a->length; i++)
- {
- for(j=0; j<b->length; j++)
- {
- coeff[i+j]+= a->coeff[i]*b->coeff[j];
- }
- }
+ for (i=0; i<a->length; i++)
+ {
+ for (j=0; j<b->length; j++)
+ {
+ coeff[i+j]+= a->coeff[i]*b->coeff[j];
+ }
+ }
- return vec;
+ return vec;
}
static SwsVector *sws_sumVec(SwsVector *a, SwsVector *b){
- int length= FFMAX(a->length, b->length);
- double *coeff= av_malloc(length*sizeof(double));
- int i;
- SwsVector *vec= av_malloc(sizeof(SwsVector));
+ int length= FFMAX(a->length, b->length);
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
- vec->coeff= coeff;
- vec->length= length;
+ vec->coeff= coeff;
+ vec->length= length;
- for(i=0; i<length; i++) coeff[i]= 0.0;
+ for (i=0; i<length; i++) coeff[i]= 0.0;
- for(i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
- for(i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i];
+ for (i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
+ for (i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i];
- return vec;
+ return vec;
}
static SwsVector *sws_diffVec(SwsVector *a, SwsVector *b){
- int length= FFMAX(a->length, b->length);
- double *coeff= av_malloc(length*sizeof(double));
- int i;
- SwsVector *vec= av_malloc(sizeof(SwsVector));
+ int length= FFMAX(a->length, b->length);
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
- vec->coeff= coeff;
- vec->length= length;
+ vec->coeff= coeff;
+ vec->length= length;
- for(i=0; i<length; i++) coeff[i]= 0.0;
+ for (i=0; i<length; i++) coeff[i]= 0.0;
- for(i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
- for(i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i];
+ for (i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
+ for (i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i];
- return vec;
+ return vec;
}
/* shift left / or right if "shift" is negative */
static SwsVector *sws_getShiftedVec(SwsVector *a, int shift){
- int length= a->length + FFABS(shift)*2;
- double *coeff= av_malloc(length*sizeof(double));
- int i;
- SwsVector *vec= av_malloc(sizeof(SwsVector));
+ int length= a->length + FFABS(shift)*2;
+ double *coeff= av_malloc(length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
- vec->coeff= coeff;
- vec->length= length;
+ vec->coeff= coeff;
+ vec->length= length;
- for(i=0; i<length; i++) coeff[i]= 0.0;
+ for (i=0; i<length; i++) coeff[i]= 0.0;
- for(i=0; i<a->length; i++)
- {
- coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i];
- }
+ for (i=0; i<a->length; i++)
+ {
+ coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i];
+ }
- return vec;
+ return vec;
}
void sws_shiftVec(SwsVector *a, int shift){
- SwsVector *shifted= sws_getShiftedVec(a, shift);
- av_free(a->coeff);
- a->coeff= shifted->coeff;
- a->length= shifted->length;
- av_free(shifted);
+ SwsVector *shifted= sws_getShiftedVec(a, shift);
+ av_free(a->coeff);
+ a->coeff= shifted->coeff;
+ a->length= shifted->length;
+ av_free(shifted);
}
void sws_addVec(SwsVector *a, SwsVector *b){
- SwsVector *sum= sws_sumVec(a, b);
- av_free(a->coeff);
- a->coeff= sum->coeff;
- a->length= sum->length;
- av_free(sum);
+ SwsVector *sum= sws_sumVec(a, b);
+ av_free(a->coeff);
+ a->coeff= sum->coeff;
+ a->length= sum->length;
+ av_free(sum);
}
void sws_subVec(SwsVector *a, SwsVector *b){
- SwsVector *diff= sws_diffVec(a, b);
- av_free(a->coeff);
- a->coeff= diff->coeff;
- a->length= diff->length;
- av_free(diff);
+ SwsVector *diff= sws_diffVec(a, b);
+ av_free(a->coeff);
+ a->coeff= diff->coeff;
+ a->length= diff->length;
+ av_free(diff);
}
void sws_convVec(SwsVector *a, SwsVector *b){
- SwsVector *conv= sws_getConvVec(a, b);
- av_free(a->coeff);
- a->coeff= conv->coeff;
- a->length= conv->length;
- av_free(conv);
+ SwsVector *conv= sws_getConvVec(a, b);
+ av_free(a->coeff);
+ a->coeff= conv->coeff;
+ a->length= conv->length;
+ av_free(conv);
}
SwsVector *sws_cloneVec(SwsVector *a){
- double *coeff= av_malloc(a->length*sizeof(double));
- int i;
- SwsVector *vec= av_malloc(sizeof(SwsVector));
+ double *coeff= av_malloc(a->length*sizeof(double));
+ int i;
+ SwsVector *vec= av_malloc(sizeof(SwsVector));
- vec->coeff= coeff;
- vec->length= a->length;
+ vec->coeff= coeff;
+ vec->length= a->length;
- for(i=0; i<a->length; i++) coeff[i]= a->coeff[i];
+ for (i=0; i<a->length; i++) coeff[i]= a->coeff[i];
- return vec;
+ return vec;
}
void sws_printVec(SwsVector *a){
- int i;
- double max=0;
- double min=0;
- double range;
-
- for(i=0; i<a->length; i++)
- if(a->coeff[i]>max) max= a->coeff[i];
-
- for(i=0; i<a->length; i++)
- if(a->coeff[i]<min) min= a->coeff[i];
-
- range= max - min;
-
- for(i=0; i<a->length; i++)
- {
- int x= (int)((a->coeff[i]-min)*60.0/range +0.5);
- av_log(NULL, AV_LOG_DEBUG, "%1.3f ", a->coeff[i]);
- for(;x>0; x--) av_log(NULL, AV_LOG_DEBUG, " ");
- av_log(NULL, AV_LOG_DEBUG, "|\n");
- }
+ int i;
+ double max=0;
+ double min=0;
+ double range;
+
+ for (i=0; i<a->length; i++)
+ if (a->coeff[i]>max) max= a->coeff[i];
+
+ for (i=0; i<a->length; i++)
+ if (a->coeff[i]<min) min= a->coeff[i];
+
+ range= max - min;
+
+ for (i=0; i<a->length; i++)
+ {
+ int x= (int)((a->coeff[i]-min)*60.0/range +0.5);
+ av_log(NULL, AV_LOG_DEBUG, "%1.3f ", a->coeff[i]);
+ for (;x>0; x--) av_log(NULL, AV_LOG_DEBUG, " ");
+ av_log(NULL, AV_LOG_DEBUG, "|\n");
+ }
}
void sws_freeVec(SwsVector *a){
- if(!a) return;
- av_free(a->coeff);
- a->coeff=NULL;
- a->length=0;
- av_free(a);
+ if (!a) return;
+ av_free(a->coeff);
+ a->coeff=NULL;
+ a->length=0;
+ av_free(a);
}
void sws_freeFilter(SwsFilter *filter){
- if(!filter) return;
+ if (!filter) return;
- if(filter->lumH) sws_freeVec(filter->lumH);
- if(filter->lumV) sws_freeVec(filter->lumV);
- if(filter->chrH) sws_freeVec(filter->chrH);
- if(filter->chrV) sws_freeVec(filter->chrV);
- av_free(filter);
+ if (filter->lumH) sws_freeVec(filter->lumH);
+ if (filter->lumV) sws_freeVec(filter->lumV);
+ if (filter->chrH) sws_freeVec(filter->chrH);
+ if (filter->chrV) sws_freeVec(filter->chrV);
+ av_free(filter);
}
void sws_freeContext(SwsContext *c){
- int i;
- if(!c) return;
-
- if(c->lumPixBuf)
- {
- for(i=0; i<c->vLumBufSize; i++)
- {
- av_free(c->lumPixBuf[i]);
- c->lumPixBuf[i]=NULL;
- }
- av_free(c->lumPixBuf);
- c->lumPixBuf=NULL;
- }
-
- if(c->chrPixBuf)
- {
- for(i=0; i<c->vChrBufSize; i++)
- {
- av_free(c->chrPixBuf[i]);
- c->chrPixBuf[i]=NULL;
- }
- av_free(c->chrPixBuf);
- c->chrPixBuf=NULL;
- }
-
- av_free(c->vLumFilter);
- c->vLumFilter = NULL;
- av_free(c->vChrFilter);
- c->vChrFilter = NULL;
- av_free(c->hLumFilter);
- c->hLumFilter = NULL;
- av_free(c->hChrFilter);
- c->hChrFilter = NULL;
+ int i;
+ if (!c) return;
+
+ if (c->lumPixBuf)
+ {
+ for (i=0; i<c->vLumBufSize; i++)
+ {
+ av_free(c->lumPixBuf[i]);
+ c->lumPixBuf[i]=NULL;
+ }
+ av_free(c->lumPixBuf);
+ c->lumPixBuf=NULL;
+ }
+
+ if (c->chrPixBuf)
+ {
+ for (i=0; i<c->vChrBufSize; i++)
+ {
+ av_free(c->chrPixBuf[i]);
+ c->chrPixBuf[i]=NULL;
+ }
+ av_free(c->chrPixBuf);
+ c->chrPixBuf=NULL;
+ }
+
+ av_free(c->vLumFilter);
+ c->vLumFilter = NULL;
+ av_free(c->vChrFilter);
+ c->vChrFilter = NULL;
+ av_free(c->hLumFilter);
+ c->hLumFilter = NULL;
+ av_free(c->hChrFilter);
+ c->hChrFilter = NULL;
#ifdef HAVE_ALTIVEC
- av_free(c->vYCoeffsBank);
- c->vYCoeffsBank = NULL;
- av_free(c->vCCoeffsBank);
- c->vCCoeffsBank = NULL;
+ av_free(c->vYCoeffsBank);
+ c->vYCoeffsBank = NULL;
+ av_free(c->vCCoeffsBank);
+ c->vCCoeffsBank = NULL;
#endif
- av_free(c->vLumFilterPos);
- c->vLumFilterPos = NULL;
- av_free(c->vChrFilterPos);
- c->vChrFilterPos = NULL;
- av_free(c->hLumFilterPos);
- c->hLumFilterPos = NULL;
- av_free(c->hChrFilterPos);
- c->hChrFilterPos = NULL;
+ av_free(c->vLumFilterPos);
+ c->vLumFilterPos = NULL;
+ av_free(c->vChrFilterPos);
+ c->vChrFilterPos = NULL;
+ av_free(c->hLumFilterPos);
+ c->hLumFilterPos = NULL;
+ av_free(c->hChrFilterPos);
+ c->hChrFilterPos = NULL;
#if defined(ARCH_X86) && defined(CONFIG_GPL)
#ifdef MAP_ANONYMOUS
- if(c->funnyYCode) munmap(c->funnyYCode, MAX_FUNNY_CODE_SIZE);
- if(c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE);
+ if (c->funnyYCode) munmap(c->funnyYCode, MAX_FUNNY_CODE_SIZE);
+ if (c->funnyUVCode) munmap(c->funnyUVCode, MAX_FUNNY_CODE_SIZE);
#else
- av_free(c->funnyYCode);
- av_free(c->funnyUVCode);
+ av_free(c->funnyYCode);
+ av_free(c->funnyUVCode);
#endif
- c->funnyYCode=NULL;
- c->funnyUVCode=NULL;
+ c->funnyYCode=NULL;
+ c->funnyUVCode=NULL;
#endif /* defined(ARCH_X86) */
- av_free(c->lumMmx2Filter);
- c->lumMmx2Filter=NULL;
- av_free(c->chrMmx2Filter);
- c->chrMmx2Filter=NULL;
- av_free(c->lumMmx2FilterPos);
- c->lumMmx2FilterPos=NULL;
- av_free(c->chrMmx2FilterPos);
- c->chrMmx2FilterPos=NULL;
- av_free(c->yuvTable);
- c->yuvTable=NULL;
-
- av_free(c);
+ av_free(c->lumMmx2Filter);
+ c->lumMmx2Filter=NULL;
+ av_free(c->chrMmx2Filter);
+ c->chrMmx2Filter=NULL;
+ av_free(c->lumMmx2FilterPos);
+ c->lumMmx2FilterPos=NULL;
+ av_free(c->chrMmx2FilterPos);
+ c->chrMmx2FilterPos=NULL;
+ av_free(c->yuvTable);
+ c->yuvTable=NULL;
+
+ av_free(c);
}
/**
@@ -2843,25 +2955,30 @@ void sws_freeContext(SwsContext *c){
* asumed to remain valid.
*/
struct SwsContext *sws_getCachedContext(struct SwsContext *context,
- int srcW, int srcH, int srcFormat,
- int dstW, int dstH, int dstFormat, int flags,
- SwsFilter *srcFilter, SwsFilter *dstFilter, double *param)
+ int srcW, int srcH, int srcFormat,
+ int dstW, int dstH, int dstFormat, int flags,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param)
{
- if (context != NULL) {
- if ((context->srcW != srcW) || (context->srcH != srcH) ||
- (context->srcFormat != srcFormat) ||
- (context->dstW != dstW) || (context->dstH != dstH) ||
- (context->dstFormat != dstFormat) || (context->flags != flags) ||
- (context->param != param))
+ static const double default_param[2] = {SWS_PARAM_DEFAULT, SWS_PARAM_DEFAULT};
+
+ if (!param)
+ param = default_param;
+
+ if (context) {
+ if (context->srcW != srcW || context->srcH != srcH ||
+ context->srcFormat != srcFormat ||
+ context->dstW != dstW || context->dstH != dstH ||
+ context->dstFormat != dstFormat || context->flags != flags ||
+ context->param[0] != param[0] || context->param[1] != param[1])
{
sws_freeContext(context);
context = NULL;
}
}
- if (context == NULL) {
+ if (!context) {
return sws_getContext(srcW, srcH, srcFormat,
- dstW, dstH, dstFormat, flags,
- srcFilter, dstFilter, param);
+ dstW, dstH, dstFormat, flags,
+ srcFilter, dstFilter, param);
}
return context;
}
diff --git a/contrib/ffmpeg/libswscale/swscale.h b/contrib/ffmpeg/libswscale/swscale.h
index 27d1aa3ef..afd3ff375 100644
--- a/contrib/ffmpeg/libswscale/swscale.h
+++ b/contrib/ffmpeg/libswscale/swscale.h
@@ -15,91 +15,93 @@
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef SWSCALE_H
-#define SWSCALE_H
+#ifndef FFMPEG_SWSCALE_H
+#define FFMPEG_SWSCALE_H
/**
* @file swscale.h
- * @brief
+ * @brief
* external api for the swscale stuff
*/
-#include "avutil.h"
+#include "libavutil/avutil.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
+#define LIBSWSCALE_VERSION_MAJOR 0
+#define LIBSWSCALE_VERSION_MINOR 5
+#define LIBSWSCALE_VERSION_MICRO 0
-#define AV_STRINGIFY(s) AV_TOSTRING(s)
-#define AV_TOSTRING(s) #s
-
-#define LIBSWSCALE_VERSION_INT ((0<<16)+(5<<8)+0)
-#define LIBSWSCALE_VERSION 0.5.0
+#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \
+ LIBSWSCALE_VERSION_MINOR, \
+ LIBSWSCALE_VERSION_MICRO)
+#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \
+ LIBSWSCALE_VERSION_MINOR, \
+ LIBSWSCALE_VERSION_MICRO)
#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT
#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION)
/* values for the flags, the stuff on the command line is different */
-#define SWS_FAST_BILINEAR 1
-#define SWS_BILINEAR 2
-#define SWS_BICUBIC 4
-#define SWS_X 8
-#define SWS_POINT 0x10
-#define SWS_AREA 0x20
-#define SWS_BICUBLIN 0x40
-#define SWS_GAUSS 0x80
-#define SWS_SINC 0x100
-#define SWS_LANCZOS 0x200
-#define SWS_SPLINE 0x400
-
-#define SWS_SRC_V_CHR_DROP_MASK 0x30000
-#define SWS_SRC_V_CHR_DROP_SHIFT 16
-
-#define SWS_PARAM_DEFAULT 123456
-
-#define SWS_PRINT_INFO 0x1000
-
-//the following 3 flags are not completly implemented
-//internal chrominace subsamling info
-#define SWS_FULL_CHR_H_INT 0x2000
+#define SWS_FAST_BILINEAR 1
+#define SWS_BILINEAR 2
+#define SWS_BICUBIC 4
+#define SWS_X 8
+#define SWS_POINT 0x10
+#define SWS_AREA 0x20
+#define SWS_BICUBLIN 0x40
+#define SWS_GAUSS 0x80
+#define SWS_SINC 0x100
+#define SWS_LANCZOS 0x200
+#define SWS_SPLINE 0x400
+
+#define SWS_SRC_V_CHR_DROP_MASK 0x30000
+#define SWS_SRC_V_CHR_DROP_SHIFT 16
+
+#define SWS_PARAM_DEFAULT 123456
+
+#define SWS_PRINT_INFO 0x1000
+
+//the following 3 flags are not completely implemented
+//internal chrominace subsampling info
+#define SWS_FULL_CHR_H_INT 0x2000
//input subsampling info
-#define SWS_FULL_CHR_H_INP 0x4000
-#define SWS_DIRECT_BGR 0x8000
-#define SWS_ACCURATE_RND 0x40000
+#define SWS_FULL_CHR_H_INP 0x4000
+#define SWS_DIRECT_BGR 0x8000
+#define SWS_ACCURATE_RND 0x40000
-#define SWS_CPU_CAPS_MMX 0x80000000
-#define SWS_CPU_CAPS_MMX2 0x20000000
-#define SWS_CPU_CAPS_3DNOW 0x40000000
-#define SWS_CPU_CAPS_ALTIVEC 0x10000000
+#define SWS_CPU_CAPS_MMX 0x80000000
+#define SWS_CPU_CAPS_MMX2 0x20000000
+#define SWS_CPU_CAPS_3DNOW 0x40000000
+#define SWS_CPU_CAPS_ALTIVEC 0x10000000
+#define SWS_CPU_CAPS_BFIN 0x01000000
#define SWS_MAX_REDUCE_CUTOFF 0.002
-#define SWS_CS_ITU709 1
-#define SWS_CS_FCC 4
-#define SWS_CS_ITU601 5
-#define SWS_CS_ITU624 5
-#define SWS_CS_SMPTE170M 5
-#define SWS_CS_SMPTE240M 7
-#define SWS_CS_DEFAULT 5
+#define SWS_CS_ITU709 1
+#define SWS_CS_FCC 4
+#define SWS_CS_ITU601 5
+#define SWS_CS_ITU624 5
+#define SWS_CS_SMPTE170M 5
+#define SWS_CS_SMPTE240M 7
+#define SWS_CS_DEFAULT 5
// when used for filters they must have an odd number of elements
// coeffs cannot be shared between vectors
typedef struct {
- double *coeff;
- int length;
+ double *coeff;
+ int length;
} SwsVector;
// vectors can be shared
typedef struct {
- SwsVector *lumH;
- SwsVector *lumV;
- SwsVector *chrH;
- SwsVector *chrV;
+ SwsVector *lumH;
+ SwsVector *lumV;
+ SwsVector *chrH;
+ SwsVector *chrV;
} SwsFilter;
struct SwsContext;
@@ -107,11 +109,11 @@ struct SwsContext;
void sws_freeContext(struct SwsContext *swsContext);
struct SwsContext *sws_getContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags,
- SwsFilter *srcFilter, SwsFilter *dstFilter, double *param);
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param);
int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]);
+ int srcSliceH, uint8_t* dst[], int dstStride[]);
int sws_scale_ordered(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]) attribute_deprecated;
+ int srcSliceH, uint8_t* dst[], int dstStride[]) attribute_deprecated;
int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4], int srcRange, const int table[4], int dstRange, int brightness, int contrast, int saturation);
@@ -130,19 +132,15 @@ SwsVector *sws_cloneVec(SwsVector *a);
void sws_printVec(SwsVector *a);
void sws_freeVec(SwsVector *a);
-SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
- float lumaSarpen, float chromaSharpen,
- float chromaHShift, float chromaVShift,
- int verbose);
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+ float lumaSarpen, float chromaSharpen,
+ float chromaHShift, float chromaVShift,
+ int verbose);
void sws_freeFilter(SwsFilter *filter);
struct SwsContext *sws_getCachedContext(struct SwsContext *context,
- int srcW, int srcH, int srcFormat,
- int dstW, int dstH, int dstFormat, int flags,
- SwsFilter *srcFilter, SwsFilter *dstFilter, double *param);
-
-#ifdef __cplusplus
-}
-#endif
+ int srcW, int srcH, int srcFormat,
+ int dstW, int dstH, int dstFormat, int flags,
+ SwsFilter *srcFilter, SwsFilter *dstFilter, double *param);
-#endif
+#endif /* FFMPEG_SWSCALE_H */
diff --git a/contrib/ffmpeg/libswscale/swscale_altivec_template.c b/contrib/ffmpeg/libswscale/swscale_altivec_template.c
index 251b38ca1..47b7e0dc6 100644
--- a/contrib/ffmpeg/libswscale/swscale_altivec_template.c
+++ b/contrib/ffmpeg/libswscale/swscale_altivec_template.c
@@ -2,7 +2,7 @@
* AltiVec-enhanced yuv2yuvX
*
* Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
- * based on the equivalent C code in "postproc/swscale.c"
+ * based on the equivalent C code in swscale.c
*
* This file is part of FFmpeg.
*
@@ -18,531 +18,521 @@
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifdef CONFIG_DARWIN
-#define AVV(x...) (x)
-#else
-#define AVV(x...) {x}
-#endif
-
#define vzero vec_splat_s32(0)
static inline void
altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW) {
- register int i;
- vector unsigned int altivec_vectorShiftInt19 =
- vec_add(vec_splat_u32(10),vec_splat_u32(9));
- if ((unsigned long)dest % 16) {
- /* badly aligned store, we force store alignement */
- /* and will handle load misalignement on val w/ vec_perm */
- vector unsigned char perm1;
- vector signed int v1;
- for (i = 0 ; (i < dstW) &&
- (((unsigned long)dest + i) % 16) ; i++) {
- int t = val[i] >> 19;
- dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
- }
- perm1 = vec_lvsl(i << 2, val);
- v1 = vec_ld(i << 2, val);
- for ( ; i < (dstW - 15); i+=16) {
- int offset = i << 2;
- vector signed int v2 = vec_ld(offset + 16, val);
- vector signed int v3 = vec_ld(offset + 32, val);
- vector signed int v4 = vec_ld(offset + 48, val);
- vector signed int v5 = vec_ld(offset + 64, val);
- vector signed int v12 = vec_perm(v1,v2,perm1);
- vector signed int v23 = vec_perm(v2,v3,perm1);
- vector signed int v34 = vec_perm(v3,v4,perm1);
- vector signed int v45 = vec_perm(v4,v5,perm1);
-
- vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
- vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
- vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
- vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
- vector unsigned short vs1 = vec_packsu(vA, vB);
- vector unsigned short vs2 = vec_packsu(vC, vD);
- vector unsigned char vf = vec_packsu(vs1, vs2);
- vec_st(vf, i, dest);
- v1 = v5;
+ register int i;
+ vector unsigned int altivec_vectorShiftInt19 =
+ vec_add(vec_splat_u32(10), vec_splat_u32(9));
+ if ((unsigned long)dest % 16) {
+ /* badly aligned store, we force store alignment */
+ /* and will handle load misalignment on val w/ vec_perm */
+ vector unsigned char perm1;
+ vector signed int v1;
+ for (i = 0 ; (i < dstW) &&
+ (((unsigned long)dest + i) % 16) ; i++) {
+ int t = val[i] >> 19;
+ dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
+ }
+ perm1 = vec_lvsl(i << 2, val);
+ v1 = vec_ld(i << 2, val);
+ for ( ; i < (dstW - 15); i+=16) {
+ int offset = i << 2;
+ vector signed int v2 = vec_ld(offset + 16, val);
+ vector signed int v3 = vec_ld(offset + 32, val);
+ vector signed int v4 = vec_ld(offset + 48, val);
+ vector signed int v5 = vec_ld(offset + 64, val);
+ vector signed int v12 = vec_perm(v1, v2, perm1);
+ vector signed int v23 = vec_perm(v2, v3, perm1);
+ vector signed int v34 = vec_perm(v3, v4, perm1);
+ vector signed int v45 = vec_perm(v4, v5, perm1);
+
+ vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
+ vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
+ vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
+ vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
+ vector unsigned short vs1 = vec_packsu(vA, vB);
+ vector unsigned short vs2 = vec_packsu(vC, vD);
+ vector unsigned char vf = vec_packsu(vs1, vs2);
+ vec_st(vf, i, dest);
+ v1 = v5;
+ }
+ } else { // dest is properly aligned, great
+ for (i = 0; i < (dstW - 15); i+=16) {
+ int offset = i << 2;
+ vector signed int v1 = vec_ld(offset, val);
+ vector signed int v2 = vec_ld(offset + 16, val);
+ vector signed int v3 = vec_ld(offset + 32, val);
+ vector signed int v4 = vec_ld(offset + 48, val);
+ vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19);
+ vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19);
+ vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19);
+ vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19);
+ vector unsigned short vs1 = vec_packsu(v5, v6);
+ vector unsigned short vs2 = vec_packsu(v7, v8);
+ vector unsigned char vf = vec_packsu(vs1, vs2);
+ vec_st(vf, i, dest);
+ }
}
- } else { // dest is properly aligned, great
- for (i = 0; i < (dstW - 15); i+=16) {
- int offset = i << 2;
- vector signed int v1 = vec_ld(offset, val);
- vector signed int v2 = vec_ld(offset + 16, val);
- vector signed int v3 = vec_ld(offset + 32, val);
- vector signed int v4 = vec_ld(offset + 48, val);
- vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19);
- vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19);
- vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19);
- vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19);
- vector unsigned short vs1 = vec_packsu(v5, v6);
- vector unsigned short vs2 = vec_packsu(v7, v8);
- vector unsigned char vf = vec_packsu(vs1, vs2);
- vec_st(vf, i, dest);
+ for ( ; i < dstW ; i++) {
+ int t = val[i] >> 19;
+ dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
}
- }
- for ( ; i < dstW ; i++) {
- int t = val[i] >> 19;
- dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
- }
}
static inline void
yuv2yuvX_altivec_real(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
{
- const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
- register int i, j;
- {
- int __attribute__ ((aligned (16))) val[dstW];
-
- for (i = 0; i < (dstW -7); i+=4) {
- vec_st(vini, i << 2, val);
- }
- for (; i < dstW; i++) {
- val[i] = (1 << 18);
- }
-
- for (j = 0; j < lumFilterSize; j++) {
- vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter);
- vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter);
- vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
- vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
-
- perm = vec_lvsl(0, lumSrc[j]);
- l1 = vec_ld(0, lumSrc[j]);
-
- for (i = 0; i < (dstW - 7); i+=8) {
- int offset = i << 2;
- vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]);
-
- vector signed int v1 = vec_ld(offset, val);
- vector signed int v2 = vec_ld(offset + 16, val);
-
- vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7]
-
- vector signed int i1 = vec_mule(vLumFilter, ls);
- vector signed int i2 = vec_mulo(vLumFilter, ls);
-
- vector signed int vf1 = vec_mergeh(i1, i2);
- vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
-
- vector signed int vo1 = vec_add(v1, vf1);
- vector signed int vo2 = vec_add(v2, vf2);
-
- vec_st(vo1, offset, val);
- vec_st(vo2, offset + 16, val);
-
- l1 = l2;
- }
- for ( ; i < dstW; i++) {
- val[i] += lumSrc[j][i] * lumFilter[j];
- }
- }
- altivec_packIntArrayToCharArray(val,dest,dstW);
- }
- if (uDest != 0) {
- int __attribute__ ((aligned (16))) u[chrDstW];
- int __attribute__ ((aligned (16))) v[chrDstW];
-
- for (i = 0; i < (chrDstW -7); i+=4) {
- vec_st(vini, i << 2, u);
- vec_st(vini, i << 2, v);
- }
- for (; i < chrDstW; i++) {
- u[i] = (1 << 18);
- v[i] = (1 << 18);
+ const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
+ register int i, j;
+ {
+ int __attribute__ ((aligned (16))) val[dstW];
+
+ for (i = 0; i < (dstW -7); i+=4) {
+ vec_st(vini, i << 2, val);
+ }
+ for (; i < dstW; i++) {
+ val[i] = (1 << 18);
+ }
+
+ for (j = 0; j < lumFilterSize; j++) {
+ vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter);
+ vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter);
+ vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
+ vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
+
+ perm = vec_lvsl(0, lumSrc[j]);
+ l1 = vec_ld(0, lumSrc[j]);
+
+ for (i = 0; i < (dstW - 7); i+=8) {
+ int offset = i << 2;
+ vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]);
+
+ vector signed int v1 = vec_ld(offset, val);
+ vector signed int v2 = vec_ld(offset + 16, val);
+
+ vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7]
+
+ vector signed int i1 = vec_mule(vLumFilter, ls);
+ vector signed int i2 = vec_mulo(vLumFilter, ls);
+
+ vector signed int vf1 = vec_mergeh(i1, i2);
+ vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
+
+ vector signed int vo1 = vec_add(v1, vf1);
+ vector signed int vo2 = vec_add(v2, vf2);
+
+ vec_st(vo1, offset, val);
+ vec_st(vo2, offset + 16, val);
+
+ l1 = l2;
+ }
+ for ( ; i < dstW; i++) {
+ val[i] += lumSrc[j][i] * lumFilter[j];
+ }
+ }
+ altivec_packIntArrayToCharArray(val, dest, dstW);
}
-
- for (j = 0; j < chrFilterSize; j++) {
- vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter);
- vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter);
- vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0);
- vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter
-
- perm = vec_lvsl(0, chrSrc[j]);
- l1 = vec_ld(0, chrSrc[j]);
- l1_V = vec_ld(2048 << 1, chrSrc[j]);
-
- for (i = 0; i < (chrDstW - 7); i+=8) {
- int offset = i << 2;
- vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]);
- vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]);
-
- vector signed int v1 = vec_ld(offset, u);
- vector signed int v2 = vec_ld(offset + 16, u);
- vector signed int v1_V = vec_ld(offset, v);
- vector signed int v2_V = vec_ld(offset + 16, v);
-
- vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7]
- vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055]
-
- vector signed int i1 = vec_mule(vChrFilter, ls);
- vector signed int i2 = vec_mulo(vChrFilter, ls);
- vector signed int i1_V = vec_mule(vChrFilter, ls_V);
- vector signed int i2_V = vec_mulo(vChrFilter, ls_V);
-
- vector signed int vf1 = vec_mergeh(i1, i2);
- vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
- vector signed int vf1_V = vec_mergeh(i1_V, i2_V);
- vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
-
- vector signed int vo1 = vec_add(v1, vf1);
- vector signed int vo2 = vec_add(v2, vf2);
- vector signed int vo1_V = vec_add(v1_V, vf1_V);
- vector signed int vo2_V = vec_add(v2_V, vf2_V);
-
- vec_st(vo1, offset, u);
- vec_st(vo2, offset + 16, u);
- vec_st(vo1_V, offset, v);
- vec_st(vo2_V, offset + 16, v);
-
- l1 = l2;
- l1_V = l2_V;
- }
- for ( ; i < chrDstW; i++) {
- u[i] += chrSrc[j][i] * chrFilter[j];
- v[i] += chrSrc[j][i + 2048] * chrFilter[j];
- }
+ if (uDest != 0) {
+ int __attribute__ ((aligned (16))) u[chrDstW];
+ int __attribute__ ((aligned (16))) v[chrDstW];
+
+ for (i = 0; i < (chrDstW -7); i+=4) {
+ vec_st(vini, i << 2, u);
+ vec_st(vini, i << 2, v);
+ }
+ for (; i < chrDstW; i++) {
+ u[i] = (1 << 18);
+ v[i] = (1 << 18);
+ }
+
+ for (j = 0; j < chrFilterSize; j++) {
+ vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter);
+ vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter);
+ vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0);
+ vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter
+
+ perm = vec_lvsl(0, chrSrc[j]);
+ l1 = vec_ld(0, chrSrc[j]);
+ l1_V = vec_ld(2048 << 1, chrSrc[j]);
+
+ for (i = 0; i < (chrDstW - 7); i+=8) {
+ int offset = i << 2;
+ vector signed short l2 = vec_ld((i << 1) + 16, chrSrc[j]);
+ vector signed short l2_V = vec_ld(((i + 2048) << 1) + 16, chrSrc[j]);
+
+ vector signed int v1 = vec_ld(offset, u);
+ vector signed int v2 = vec_ld(offset + 16, u);
+ vector signed int v1_V = vec_ld(offset, v);
+ vector signed int v2_V = vec_ld(offset + 16, v);
+
+ vector signed short ls = vec_perm(l1, l2, perm); // chrSrc[j][i] ... chrSrc[j][i+7]
+ vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrSrc[j][i+2048] ... chrSrc[j][i+2055]
+
+ vector signed int i1 = vec_mule(vChrFilter, ls);
+ vector signed int i2 = vec_mulo(vChrFilter, ls);
+ vector signed int i1_V = vec_mule(vChrFilter, ls_V);
+ vector signed int i2_V = vec_mulo(vChrFilter, ls_V);
+
+ vector signed int vf1 = vec_mergeh(i1, i2);
+ vector signed int vf2 = vec_mergel(i1, i2); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
+ vector signed int vf1_V = vec_mergeh(i1_V, i2_V);
+ vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrSrc[j][i] * chrFilter[j] ... chrSrc[j][i+7] * chrFilter[j]
+
+ vector signed int vo1 = vec_add(v1, vf1);
+ vector signed int vo2 = vec_add(v2, vf2);
+ vector signed int vo1_V = vec_add(v1_V, vf1_V);
+ vector signed int vo2_V = vec_add(v2_V, vf2_V);
+
+ vec_st(vo1, offset, u);
+ vec_st(vo2, offset + 16, u);
+ vec_st(vo1_V, offset, v);
+ vec_st(vo2_V, offset + 16, v);
+
+ l1 = l2;
+ l1_V = l2_V;
+ }
+ for ( ; i < chrDstW; i++) {
+ u[i] += chrSrc[j][i] * chrFilter[j];
+ v[i] += chrSrc[j][i + 2048] * chrFilter[j];
+ }
+ }
+ altivec_packIntArrayToCharArray(u, uDest, chrDstW);
+ altivec_packIntArrayToCharArray(v, vDest, chrDstW);
}
- altivec_packIntArrayToCharArray(u,uDest,chrDstW);
- altivec_packIntArrayToCharArray(v,vDest,chrDstW);
- }
}
static inline void hScale_altivec_real(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc, int16_t *filter, int16_t *filterPos, int filterSize) {
- register int i;
- int __attribute__ ((aligned (16))) tempo[4];
-
- if (filterSize % 4) {
- for(i=0; i<dstW; i++) {
- register int j;
- register int srcPos = filterPos[i];
- register int val = 0;
- for(j=0; j<filterSize; j++) {
- val += ((int)src[srcPos + j])*filter[filterSize*i + j];
- }
- dst[i] = av_clip(val>>7, 0, (1<<15)-1);
+ register int i;
+ int __attribute__ ((aligned (16))) tempo[4];
+
+ if (filterSize % 4) {
+ for (i=0; i<dstW; i++) {
+ register int j;
+ register int srcPos = filterPos[i];
+ register int val = 0;
+ for (j=0; j<filterSize; j++) {
+ val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+ }
+ dst[i] = av_clip(val>>7, 0, (1<<15)-1);
+ }
}
- }
- else
- switch (filterSize) {
- case 4:
+ else
+ switch (filterSize) {
+ case 4:
{
- for(i=0; i<dstW; i++) {
- register int srcPos = filterPos[i];
-
- vector unsigned char src_v0 = vec_ld(srcPos, src);
- vector unsigned char src_v1, src_vF;
- vector signed short src_v, filter_v;
- vector signed int val_vEven, val_s;
- if ((((int)src + srcPos)% 16) > 12) {
- src_v1 = vec_ld(srcPos + 16, src);
- }
- src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
-
- src_v = // vec_unpackh sign-extends...
- (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
- // now put our elements in the even slots
- src_v = vec_mergeh(src_v, (vector signed short)vzero);
-
- filter_v = vec_ld(i << 3, filter);
+ for (i=0; i<dstW; i++) {
+ register int srcPos = filterPos[i];
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1, src_vF;
+ vector signed short src_v, filter_v;
+ vector signed int val_vEven, val_s;
+ if ((((int)src + srcPos)% 16) > 12) {
+ src_v1 = vec_ld(srcPos + 16, src);
+ }
+ src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+ src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ // now put our elements in the even slots
+ src_v = vec_mergeh(src_v, (vector signed short)vzero);
+
+ filter_v = vec_ld(i << 3, filter);
// the 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2)
// the neat trick : we only care for half the elements,
// high or low depending on (i<<3)%16 (it's 0 or 8 here),
// and we're going to use vec_mule, so we chose
// carefully how to "unpack" the elements into the even slots
- if ((i << 3) % 16)
- filter_v = vec_mergel(filter_v,(vector signed short)vzero);
- else
- filter_v = vec_mergeh(filter_v,(vector signed short)vzero);
-
- val_vEven = vec_mule(src_v, filter_v);
- val_s = vec_sums(val_vEven, vzero);
- vec_st(val_s, 0, tempo);
- dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1);
- }
+ if ((i << 3) % 16)
+ filter_v = vec_mergel(filter_v, (vector signed short)vzero);
+ else
+ filter_v = vec_mergeh(filter_v, (vector signed short)vzero);
+
+ val_vEven = vec_mule(src_v, filter_v);
+ val_s = vec_sums(val_vEven, vzero);
+ vec_st(val_s, 0, tempo);
+ dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1);
+ }
}
break;
- case 8:
+ case 8:
{
- for(i=0; i<dstW; i++) {
- register int srcPos = filterPos[i];
-
- vector unsigned char src_v0 = vec_ld(srcPos, src);
- vector unsigned char src_v1, src_vF;
- vector signed short src_v, filter_v;
- vector signed int val_v, val_s;
- if ((((int)src + srcPos)% 16) > 8) {
- src_v1 = vec_ld(srcPos + 16, src);
- }
- src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
-
- src_v = // vec_unpackh sign-extends...
- (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
- filter_v = vec_ld(i << 4, filter);
+ for (i=0; i<dstW; i++) {
+ register int srcPos = filterPos[i];
+
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1, src_vF;
+ vector signed short src_v, filter_v;
+ vector signed int val_v, val_s;
+ if ((((int)src + srcPos)% 16) > 8) {
+ src_v1 = vec_ld(srcPos + 16, src);
+ }
+ src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+
+ src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ filter_v = vec_ld(i << 4, filter);
// the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
- val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
- val_s = vec_sums(val_v, vzero);
- vec_st(val_s, 0, tempo);
- dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1);
- }
+ val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
+ val_s = vec_sums(val_v, vzero);
+ vec_st(val_s, 0, tempo);
+ dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1);
+ }
}
break;
- case 16:
+ case 16:
{
- for(i=0; i<dstW; i++) {
- register int srcPos = filterPos[i];
+ for (i=0; i<dstW; i++) {
+ register int srcPos = filterPos[i];
- vector unsigned char src_v0 = vec_ld(srcPos, src);
- vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
- vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
+ vector unsigned char src_v0 = vec_ld(srcPos, src);
+ vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
- vector signed short src_vA = // vec_unpackh sign-extends...
- (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
- vector signed short src_vB = // vec_unpackh sign-extends...
- (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+ vector signed short src_vA = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ vector signed short src_vB = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
- vector signed short filter_v0 = vec_ld(i << 5, filter);
- vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
- // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
+ vector signed short filter_v0 = vec_ld(i << 5, filter);
+ vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
+ // the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
- vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
- vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
+ vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
+ vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
- vector signed int val_s = vec_sums(val_v, vzero);
+ vector signed int val_s = vec_sums(val_v, vzero);
- vec_st(val_s, 0, tempo);
- dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1);
- }
+ vec_st(val_s, 0, tempo);
+ dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1);
+ }
}
break;
-
- default:
+
+ default:
{
- for(i=0; i<dstW; i++) {
- register int j;
- register int srcPos = filterPos[i];
+ for (i=0; i<dstW; i++) {
+ register int j;
+ register int srcPos = filterPos[i];
vector signed int val_s, val_v = (vector signed int)vzero;
- vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
+ vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter);
vector unsigned char src_v0 = vec_ld(srcPos, src);
vector unsigned char permS = vec_lvsl(srcPos, src);
for (j = 0 ; j < filterSize - 15; j += 16) {
- vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
- vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
-
- vector signed short src_vA = // vec_unpackh sign-extends...
- (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
- vector signed short src_vB = // vec_unpackh sign-extends...
- (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
-
- vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
- vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
- vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF);
- vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF);
-
- vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
- val_v = vec_msums(src_vB, filter_v1, val_acc);
-
- filter_v0R = filter_v2R;
- src_v0 = src_v1;
+ vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
+ vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
+
+ vector signed short src_vA = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ vector signed short src_vB = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
+
+ vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+ vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
+ vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF);
+ vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF);
+
+ vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
+ val_v = vec_msums(src_vB, filter_v1, val_acc);
+
+ filter_v0R = filter_v2R;
+ src_v0 = src_v1;
}
- if (j < (filterSize-7)) {
- // loading src_v0 is useless, it's already done above
- //vector unsigned char src_v0 = vec_ld(srcPos + j, src);
- vector unsigned char src_v1, src_vF;
- vector signed short src_v, filter_v1R, filter_v;
- if ((((int)src + srcPos)% 16) > 8) {
- src_v1 = vec_ld(srcPos + j + 16, src);
- }
- src_vF = vec_perm(src_v0, src_v1, permS);
-
- src_v = // vec_unpackh sign-extends...
- (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
- // loading filter_v0R is useless, it's already done above
- //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
- filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
- filter_v = vec_perm(filter_v0R, filter_v1R, permF);
-
- val_v = vec_msums(src_v, filter_v, val_v);
+ if (j < filterSize-7) {
+ // loading src_v0 is useless, it's already done above
+ //vector unsigned char src_v0 = vec_ld(srcPos + j, src);
+ vector unsigned char src_v1, src_vF;
+ vector signed short src_v, filter_v1R, filter_v;
+ if ((((int)src + srcPos)% 16) > 8) {
+ src_v1 = vec_ld(srcPos + j + 16, src);
+ }
+ src_vF = vec_perm(src_v0, src_v1, permS);
+
+ src_v = // vec_unpackh sign-extends...
+ (vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
+ // loading filter_v0R is useless, it's already done above
+ //vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
+ filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
+ filter_v = vec_perm(filter_v0R, filter_v1R, permF);
+
+ val_v = vec_msums(src_v, filter_v, val_v);
}
val_s = vec_sums(val_v, vzero);
-
+
vec_st(val_s, 0, tempo);
- dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1);
- }
-
+ dst[i] = av_clip(tempo[3]>>7, 0, (1<<15)-1);
+ }
+
+ }
}
- }
}
static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
- uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
- // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
- uint8_t *ysrc = src[0];
- uint8_t *usrc = src[1];
- uint8_t *vsrc = src[2];
- const int width = c->srcW;
- const int height = srcSliceH;
- const int lumStride = srcStride[0];
- const int chromStride = srcStride[1];
- const int dstStride = dstStride_a[0];
- const vector unsigned char yperm = vec_lvsl(0, ysrc);
- const int vertLumPerChroma = 2;
- register unsigned int y;
-
- if(width&15){
- yv12toyuy2( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
- return srcSliceH;
- }
-
- /* this code assume:
+ int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
+ uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
+ // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
+ uint8_t *ysrc = src[0];
+ uint8_t *usrc = src[1];
+ uint8_t *vsrc = src[2];
+ const int width = c->srcW;
+ const int height = srcSliceH;
+ const int lumStride = srcStride[0];
+ const int chromStride = srcStride[1];
+ const int dstStride = dstStride_a[0];
+ const vector unsigned char yperm = vec_lvsl(0, ysrc);
+ const int vertLumPerChroma = 2;
+ register unsigned int y;
+
+ if (width&15) {
+ yv12toyuy2(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride);
+ return srcSliceH;
+ }
- 1) dst is 16 bytes-aligned
- 2) dstStride is a multiple of 16
- 3) width is a multiple of 16
- 4) lum&chrom stride are multiple of 8
- */
-
- for(y=0; y<height; y++)
- {
- int i;
- for (i = 0; i < width - 31; i+= 32) {
- const unsigned int j = i >> 1;
- vector unsigned char v_yA = vec_ld(i, ysrc);
- vector unsigned char v_yB = vec_ld(i + 16, ysrc);
- vector unsigned char v_yC = vec_ld(i + 32, ysrc);
- vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
- vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
- vector unsigned char v_uA = vec_ld(j, usrc);
- vector unsigned char v_uB = vec_ld(j + 16, usrc);
- vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
- vector unsigned char v_vA = vec_ld(j, vsrc);
- vector unsigned char v_vB = vec_ld(j + 16, vsrc);
- vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
- vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
- vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
- vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
- vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
- vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
- vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
- vec_st(v_yuy2_0, (i << 1), dst);
- vec_st(v_yuy2_1, (i << 1) + 16, dst);
- vec_st(v_yuy2_2, (i << 1) + 32, dst);
- vec_st(v_yuy2_3, (i << 1) + 48, dst);
- }
- if (i < width) {
- const unsigned int j = i >> 1;
- vector unsigned char v_y1 = vec_ld(i, ysrc);
- vector unsigned char v_u = vec_ld(j, usrc);
- vector unsigned char v_v = vec_ld(j, vsrc);
- vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
- vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
- vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
- vec_st(v_yuy2_0, (i << 1), dst);
- vec_st(v_yuy2_1, (i << 1) + 16, dst);
- }
- if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
- {
- usrc += chromStride;
- vsrc += chromStride;
- }
- ysrc += lumStride;
- dst += dstStride;
+ /* this code assume:
+
+ 1) dst is 16 bytes-aligned
+ 2) dstStride is a multiple of 16
+ 3) width is a multiple of 16
+ 4) lum&chrom stride are multiple of 8
+ */
+
+ for (y=0; y<height; y++) {
+ int i;
+ for (i = 0; i < width - 31; i+= 32) {
+ const unsigned int j = i >> 1;
+ vector unsigned char v_yA = vec_ld(i, ysrc);
+ vector unsigned char v_yB = vec_ld(i + 16, ysrc);
+ vector unsigned char v_yC = vec_ld(i + 32, ysrc);
+ vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
+ vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
+ vector unsigned char v_uA = vec_ld(j, usrc);
+ vector unsigned char v_uB = vec_ld(j + 16, usrc);
+ vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
+ vector unsigned char v_vA = vec_ld(j, vsrc);
+ vector unsigned char v_vB = vec_ld(j + 16, vsrc);
+ vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
+ vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+ vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
+ vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
+ vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
+ vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
+ vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
+ vec_st(v_yuy2_0, (i << 1), dst);
+ vec_st(v_yuy2_1, (i << 1) + 16, dst);
+ vec_st(v_yuy2_2, (i << 1) + 32, dst);
+ vec_st(v_yuy2_3, (i << 1) + 48, dst);
+ }
+ if (i < width) {
+ const unsigned int j = i >> 1;
+ vector unsigned char v_y1 = vec_ld(i, ysrc);
+ vector unsigned char v_u = vec_ld(j, usrc);
+ vector unsigned char v_v = vec_ld(j, vsrc);
+ vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+ vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
+ vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
+ vec_st(v_yuy2_0, (i << 1), dst);
+ vec_st(v_yuy2_1, (i << 1) + 16, dst);
+ }
+ if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
}
-
- return srcSliceH;
-}
-static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
- uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
- // yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
- uint8_t *ysrc = src[0];
- uint8_t *usrc = src[1];
- uint8_t *vsrc = src[2];
- const int width = c->srcW;
- const int height = srcSliceH;
- const int lumStride = srcStride[0];
- const int chromStride = srcStride[1];
- const int dstStride = dstStride_a[0];
- const int vertLumPerChroma = 2;
- const vector unsigned char yperm = vec_lvsl(0, ysrc);
- register unsigned int y;
-
- if(width&15){
- yv12touyvy( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
return srcSliceH;
- }
+}
- /* this code assume:
+static inline int yv12touyvy_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
+ uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
+ // yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0], srcStride[1], dstStride[0]);
+ uint8_t *ysrc = src[0];
+ uint8_t *usrc = src[1];
+ uint8_t *vsrc = src[2];
+ const int width = c->srcW;
+ const int height = srcSliceH;
+ const int lumStride = srcStride[0];
+ const int chromStride = srcStride[1];
+ const int dstStride = dstStride_a[0];
+ const int vertLumPerChroma = 2;
+ const vector unsigned char yperm = vec_lvsl(0, ysrc);
+ register unsigned int y;
+
+ if (width&15) {
+ yv12touyvy(ysrc, usrc, vsrc, dst, c->srcW, srcSliceH, lumStride, chromStride, dstStride);
+ return srcSliceH;
+ }
- 1) dst is 16 bytes-aligned
- 2) dstStride is a multiple of 16
- 3) width is a multiple of 16
- 4) lum&chrom stride are multiple of 8
- */
-
- for(y=0; y<height; y++)
- {
- int i;
- for (i = 0; i < width - 31; i+= 32) {
- const unsigned int j = i >> 1;
- vector unsigned char v_yA = vec_ld(i, ysrc);
- vector unsigned char v_yB = vec_ld(i + 16, ysrc);
- vector unsigned char v_yC = vec_ld(i + 32, ysrc);
- vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
- vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
- vector unsigned char v_uA = vec_ld(j, usrc);
- vector unsigned char v_uB = vec_ld(j + 16, usrc);
- vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
- vector unsigned char v_vA = vec_ld(j, vsrc);
- vector unsigned char v_vB = vec_ld(j + 16, vsrc);
- vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
- vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
- vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
- vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
- vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
- vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2);
- vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2);
- vec_st(v_uyvy_0, (i << 1), dst);
- vec_st(v_uyvy_1, (i << 1) + 16, dst);
- vec_st(v_uyvy_2, (i << 1) + 32, dst);
- vec_st(v_uyvy_3, (i << 1) + 48, dst);
- }
- if (i < width) {
- const unsigned int j = i >> 1;
- vector unsigned char v_y1 = vec_ld(i, ysrc);
- vector unsigned char v_u = vec_ld(j, usrc);
- vector unsigned char v_v = vec_ld(j, vsrc);
- vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
- vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
- vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
- vec_st(v_uyvy_0, (i << 1), dst);
- vec_st(v_uyvy_1, (i << 1) + 16, dst);
- }
- if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
- {
- usrc += chromStride;
- vsrc += chromStride;
- }
- ysrc += lumStride;
- dst += dstStride;
+ /* this code assume:
+
+ 1) dst is 16 bytes-aligned
+ 2) dstStride is a multiple of 16
+ 3) width is a multiple of 16
+ 4) lum&chrom stride are multiple of 8
+ */
+
+ for (y=0; y<height; y++) {
+ int i;
+ for (i = 0; i < width - 31; i+= 32) {
+ const unsigned int j = i >> 1;
+ vector unsigned char v_yA = vec_ld(i, ysrc);
+ vector unsigned char v_yB = vec_ld(i + 16, ysrc);
+ vector unsigned char v_yC = vec_ld(i + 32, ysrc);
+ vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
+ vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
+ vector unsigned char v_uA = vec_ld(j, usrc);
+ vector unsigned char v_uB = vec_ld(j + 16, usrc);
+ vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
+ vector unsigned char v_vA = vec_ld(j, vsrc);
+ vector unsigned char v_vB = vec_ld(j + 16, vsrc);
+ vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
+ vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+ vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
+ vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
+ vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
+ vector unsigned char v_uyvy_2 = vec_mergeh(v_uv_b, v_y2);
+ vector unsigned char v_uyvy_3 = vec_mergel(v_uv_b, v_y2);
+ vec_st(v_uyvy_0, (i << 1), dst);
+ vec_st(v_uyvy_1, (i << 1) + 16, dst);
+ vec_st(v_uyvy_2, (i << 1) + 32, dst);
+ vec_st(v_uyvy_3, (i << 1) + 48, dst);
+ }
+ if (i < width) {
+ const unsigned int j = i >> 1;
+ vector unsigned char v_y1 = vec_ld(i, ysrc);
+ vector unsigned char v_u = vec_ld(j, usrc);
+ vector unsigned char v_v = vec_ld(j, vsrc);
+ vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
+ vector unsigned char v_uyvy_0 = vec_mergeh(v_uv_a, v_y1);
+ vector unsigned char v_uyvy_1 = vec_mergel(v_uv_a, v_y1);
+ vec_st(v_uyvy_0, (i << 1), dst);
+ vec_st(v_uyvy_1, (i << 1) + 16, dst);
+ }
+ if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
+ usrc += chromStride;
+ vsrc += chromStride;
+ }
+ ysrc += lumStride;
+ dst += dstStride;
}
- return srcSliceH;
+ return srcSliceH;
}
diff --git a/contrib/ffmpeg/libswscale/swscale_bfin.c b/contrib/ffmpeg/libswscale/swscale_bfin.c
new file mode 100644
index 000000000..bbc304dfe
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/swscale_bfin.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2007 Marc Hoffman <marc.hoffman@analog.com>
+ *
+ * Blackfin Software Video SCALER Operations
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <assert.h>
+#include "config.h"
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+#include <unistd.h>
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+
+#ifdef __FDPIC__
+#define L1CODE __attribute__ ((l1_text))
+#else
+#define L1CODE
+#endif
+
+extern int ff_bfin_uyvytoyv12 (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride) L1CODE;
+
+extern int ff_bfin_yuyvtoyv12 (const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
+ long width, long height,
+ long lumStride, long chromStride, long srcStride) L1CODE;
+
+static int uyvytoyv12_unscaled (SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+ uint8_t *dsty = dst[0] + dstStride[0]*srcSliceY;
+ uint8_t *dstu = dst[1] + dstStride[1]*srcSliceY/2;
+ uint8_t *dstv = dst[2] + dstStride[2]*srcSliceY/2;
+ uint8_t *ip = src[0] + srcStride[0]*srcSliceY;
+ int w = dstStride[0];
+
+ ff_bfin_uyvytoyv12 (ip, dsty, dstu, dstv, w, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
+
+ return srcSliceH;
+}
+
+static int yuyvtoyv12_unscaled (SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[])
+{
+ uint8_t *dsty = dst[0] + dstStride[0]*srcSliceY;
+ uint8_t *dstu = dst[1] + dstStride[1]*srcSliceY/2;
+ uint8_t *dstv = dst[2] + dstStride[2]*srcSliceY/2;
+ uint8_t *ip = src[0] + srcStride[0]*srcSliceY;
+ int w = dstStride[0];
+
+ ff_bfin_yuyvtoyv12 (ip, dsty, dstu, dstv, w, srcSliceH, dstStride[0], dstStride[1], srcStride[0]);
+
+ return srcSliceH;
+}
+
+
+void ff_bfin_get_unscaled_swscale (SwsContext *c)
+{
+ SwsFunc swScale = c->swScale;
+ if (c->flags & SWS_CPU_CAPS_BFIN)
+ if (c->dstFormat == PIX_FMT_YUV420P)
+ if (c->srcFormat == PIX_FMT_UYVY422) {
+ av_log (NULL, AV_LOG_VERBOSE, "selecting Blackfin optimized uyvytoyv12_unscaled\n");
+ c->swScale = uyvytoyv12_unscaled;
+ }
+ if (c->dstFormat == PIX_FMT_YUV420P)
+ if (c->srcFormat == PIX_FMT_YUYV422) {
+ av_log (NULL, AV_LOG_VERBOSE, "selecting Blackfin optimized yuyvtoyv12_unscaled\n");
+ c->swScale = yuyvtoyv12_unscaled;
+ }
+}
diff --git a/contrib/ffmpeg/libswscale/swscale_internal.h b/contrib/ffmpeg/libswscale/swscale_internal.h
index 5b62ea065..aa583a6b1 100644
--- a/contrib/ffmpeg/libswscale/swscale_internal.h
+++ b/contrib/ffmpeg/libswscale/swscale_internal.h
@@ -15,11 +15,13 @@
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef SWSCALE_INTERNAL_H
-#define SWSCALE_INTERNAL_H
+#ifndef FFMPEG_SWSCALE_INTERNAL_H
+#define FFMPEG_SWSCALE_INTERNAL_H
+
+#include "config.h"
#ifdef HAVE_ALTIVEC_H
#include <altivec.h>
@@ -27,12 +29,6 @@
#include "avutil.h"
-#ifdef CONFIG_DARWIN
-#define AVV(x...) (x)
-#else
-#define AVV(x...) {x}
-#endif
-
#define MAX_FILTER_SIZE 256
typedef int (*SwsFunc)(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
@@ -40,114 +36,114 @@ typedef int (*SwsFunc)(struct SwsContext *context, uint8_t* src[], int srcStride
/* this struct should be aligned on at least 32-byte boundary */
typedef struct SwsContext{
- /**
- * info on struct for av_log
- */
- AVClass *av_class;
-
- /**
- *
- * Note the src,dst,srcStride,dstStride will be copied, in the sws_scale() warper so they can freely be modified here
- */
- SwsFunc swScale;
- int srcW, srcH, dstH;
- int chrSrcW, chrSrcH, chrDstW, chrDstH;
- int lumXInc, chrXInc;
- int lumYInc, chrYInc;
- int dstFormat, srcFormat; ///< format 4:2:0 type is allways YV12
- int origDstFormat, origSrcFormat; ///< format
- int chrSrcHSubSample, chrSrcVSubSample;
- int chrIntHSubSample, chrIntVSubSample;
- int chrDstHSubSample, chrDstVSubSample;
- int vChrDrop;
- int sliceDir;
- double param[2];
-
- int16_t **lumPixBuf;
- int16_t **chrPixBuf;
- int16_t *hLumFilter;
- int16_t *hLumFilterPos;
- int16_t *hChrFilter;
- int16_t *hChrFilterPos;
- int16_t *vLumFilter;
- int16_t *vLumFilterPos;
- int16_t *vChrFilter;
- int16_t *vChrFilterPos;
-
- uint8_t formatConvBuffer[4000]; //FIXME dynamic alloc, but we have to change a lot of code for this to be useful
-
- int hLumFilterSize;
- int hChrFilterSize;
- int vLumFilterSize;
- int vChrFilterSize;
- int vLumBufSize;
- int vChrBufSize;
-
- uint8_t *funnyYCode;
- uint8_t *funnyUVCode;
- int32_t *lumMmx2FilterPos;
- int32_t *chrMmx2FilterPos;
- int16_t *lumMmx2Filter;
- int16_t *chrMmx2Filter;
-
- int canMMX2BeUsed;
-
- int lastInLumBuf;
- int lastInChrBuf;
- int lumBufIndex;
- int chrBufIndex;
- int dstY;
- int flags;
- void * yuvTable; // pointer to the yuv->rgb table start so it can be freed()
- uint8_t * table_rV[256];
- uint8_t * table_gU[256];
- int table_gV[256];
- uint8_t * table_bU[256];
-
- //Colorspace stuff
- int contrast, brightness, saturation; // for sws_getColorspaceDetails
- int srcColorspaceTable[4];
- int dstColorspaceTable[4];
- int srcRange, dstRange;
-
-#define RED_DITHER "0*8"
-#define GREEN_DITHER "1*8"
-#define BLUE_DITHER "2*8"
-#define Y_COEFF "3*8"
-#define VR_COEFF "4*8"
-#define UB_COEFF "5*8"
-#define VG_COEFF "6*8"
-#define UG_COEFF "7*8"
-#define Y_OFFSET "8*8"
-#define U_OFFSET "9*8"
-#define V_OFFSET "10*8"
+ /**
+ * info on struct for av_log
+ */
+ AVClass *av_class;
+
+ /**
+ * Note that src, dst, srcStride, dstStride will be copied in the
+ * sws_scale() wrapper so they can be freely modified here.
+ */
+ SwsFunc swScale;
+ int srcW, srcH, dstH;
+ int chrSrcW, chrSrcH, chrDstW, chrDstH;
+ int lumXInc, chrXInc;
+ int lumYInc, chrYInc;
+ int dstFormat, srcFormat; ///< format 4:2:0 type is always YV12
+ int origDstFormat, origSrcFormat; ///< format
+ int chrSrcHSubSample, chrSrcVSubSample;
+ int chrIntHSubSample, chrIntVSubSample;
+ int chrDstHSubSample, chrDstVSubSample;
+ int vChrDrop;
+ int sliceDir;
+ double param[2];
+
+ int16_t **lumPixBuf;
+ int16_t **chrPixBuf;
+ int16_t *hLumFilter;
+ int16_t *hLumFilterPos;
+ int16_t *hChrFilter;
+ int16_t *hChrFilterPos;
+ int16_t *vLumFilter;
+ int16_t *vLumFilterPos;
+ int16_t *vChrFilter;
+ int16_t *vChrFilterPos;
+
+ uint8_t formatConvBuffer[4000]; //FIXME dynamic alloc, but we have to change a lot of code for this to be useful
+
+ int hLumFilterSize;
+ int hChrFilterSize;
+ int vLumFilterSize;
+ int vChrFilterSize;
+ int vLumBufSize;
+ int vChrBufSize;
+
+ uint8_t *funnyYCode;
+ uint8_t *funnyUVCode;
+ int32_t *lumMmx2FilterPos;
+ int32_t *chrMmx2FilterPos;
+ int16_t *lumMmx2Filter;
+ int16_t *chrMmx2Filter;
+
+ int canMMX2BeUsed;
+
+ int lastInLumBuf;
+ int lastInChrBuf;
+ int lumBufIndex;
+ int chrBufIndex;
+ int dstY;
+ int flags;
+ void * yuvTable; // pointer to the yuv->rgb table start so it can be freed()
+ uint8_t * table_rV[256];
+ uint8_t * table_gU[256];
+ int table_gV[256];
+ uint8_t * table_bU[256];
+
+ //Colorspace stuff
+ int contrast, brightness, saturation; // for sws_getColorspaceDetails
+ int srcColorspaceTable[4];
+ int dstColorspaceTable[4];
+ int srcRange, dstRange;
+
+#define RED_DITHER "0*8"
+#define GREEN_DITHER "1*8"
+#define BLUE_DITHER "2*8"
+#define Y_COEFF "3*8"
+#define VR_COEFF "4*8"
+#define UB_COEFF "5*8"
+#define VG_COEFF "6*8"
+#define UG_COEFF "7*8"
+#define Y_OFFSET "8*8"
+#define U_OFFSET "9*8"
+#define V_OFFSET "10*8"
#define LUM_MMX_FILTER_OFFSET "11*8"
#define CHR_MMX_FILTER_OFFSET "11*8+4*4*256"
-#define DSTW_OFFSET "11*8+4*4*256*2" //do not change, its hardcoded in the asm
-#define ESP_OFFSET "11*8+4*4*256*2+8"
-#define VROUNDER_OFFSET "11*8+4*4*256*2+16"
-#define U_TEMP "11*8+4*4*256*2+24"
-#define V_TEMP "11*8+4*4*256*2+32"
-
- uint64_t redDither __attribute__((aligned(8)));
- uint64_t greenDither __attribute__((aligned(8)));
- uint64_t blueDither __attribute__((aligned(8)));
-
- uint64_t yCoeff __attribute__((aligned(8)));
- uint64_t vrCoeff __attribute__((aligned(8)));
- uint64_t ubCoeff __attribute__((aligned(8)));
- uint64_t vgCoeff __attribute__((aligned(8)));
- uint64_t ugCoeff __attribute__((aligned(8)));
- uint64_t yOffset __attribute__((aligned(8)));
- uint64_t uOffset __attribute__((aligned(8)));
- uint64_t vOffset __attribute__((aligned(8)));
- int32_t lumMmxFilter[4*MAX_FILTER_SIZE];
- int32_t chrMmxFilter[4*MAX_FILTER_SIZE];
- int dstW;
- uint64_t esp __attribute__((aligned(8)));
- uint64_t vRounder __attribute__((aligned(8)));
- uint64_t u_temp __attribute__((aligned(8)));
- uint64_t v_temp __attribute__((aligned(8)));
+#define DSTW_OFFSET "11*8+4*4*256*2" //do not change, it is hardcoded in the asm
+#define ESP_OFFSET "11*8+4*4*256*2+8"
+#define VROUNDER_OFFSET "11*8+4*4*256*2+16"
+#define U_TEMP "11*8+4*4*256*2+24"
+#define V_TEMP "11*8+4*4*256*2+32"
+
+ uint64_t redDither __attribute__((aligned(8)));
+ uint64_t greenDither __attribute__((aligned(8)));
+ uint64_t blueDither __attribute__((aligned(8)));
+
+ uint64_t yCoeff __attribute__((aligned(8)));
+ uint64_t vrCoeff __attribute__((aligned(8)));
+ uint64_t ubCoeff __attribute__((aligned(8)));
+ uint64_t vgCoeff __attribute__((aligned(8)));
+ uint64_t ugCoeff __attribute__((aligned(8)));
+ uint64_t yOffset __attribute__((aligned(8)));
+ uint64_t uOffset __attribute__((aligned(8)));
+ uint64_t vOffset __attribute__((aligned(8)));
+ int32_t lumMmxFilter[4*MAX_FILTER_SIZE];
+ int32_t chrMmxFilter[4*MAX_FILTER_SIZE];
+ int dstW;
+ uint64_t esp __attribute__((aligned(8)));
+ uint64_t vRounder __attribute__((aligned(8)));
+ uint64_t u_temp __attribute__((aligned(8)));
+ uint64_t v_temp __attribute__((aligned(8)));
#ifdef HAVE_ALTIVEC
@@ -158,8 +154,27 @@ typedef struct SwsContext{
vector signed short CGV;
vector signed short OY;
vector unsigned short CSHIFT;
- vector signed short *vYCoeffsBank, *vCCoeffsBank;
+ vector signed short *vYCoeffsBank, *vCCoeffsBank;
+
+#endif
+
+#ifdef ARCH_BFIN
+ uint32_t oy __attribute__((aligned(4)));
+ uint32_t oc __attribute__((aligned(4)));
+ uint32_t zero __attribute__((aligned(4)));
+ uint32_t cy __attribute__((aligned(4)));
+ uint32_t crv __attribute__((aligned(4)));
+ uint32_t rmask __attribute__((aligned(4)));
+ uint32_t cbu __attribute__((aligned(4)));
+ uint32_t bmask __attribute__((aligned(4)));
+ uint32_t cgu __attribute__((aligned(4)));
+ uint32_t cgv __attribute__((aligned(4)));
+ uint32_t gmask __attribute__((aligned(4)));
+#endif
+
+#ifdef HAVE_VIS
+ uint64_t sparc_coeffs[10] __attribute__((aligned(8)));
#endif
} SwsContext;
@@ -168,24 +183,60 @@ typedef struct SwsContext{
SwsFunc yuv2rgb_get_func_ptr (SwsContext *c);
int yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[4], int fullRange, int brightness, int contrast, int saturation);
+void yuv2rgb_altivec_init_tables (SwsContext *c, const int inv_table[4],int brightness,int contrast, int saturation);
+SwsFunc yuv2rgb_init_altivec (SwsContext *c);
+void altivec_yuv2packedX (SwsContext *c,
+ int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, int dstW, int dstY);
+
char *sws_format_name(int format);
//FIXME replace this with something faster
-#define isPlanarYUV(x) ((x)==PIX_FMT_YUV410P || (x)==PIX_FMT_YUV420P \
- || (x)==PIX_FMT_YUV411P || (x)==PIX_FMT_YUV422P \
- || (x)==PIX_FMT_YUV444P || (x)==PIX_FMT_NV12 \
- || (x)==PIX_FMT_NV21)
-#define isYUV(x) ((x)==PIX_FMT_UYVY422 || (x)==PIX_FMT_YUYV422 || isPlanarYUV(x))
-#define isGray(x) ((x)==PIX_FMT_GRAY8 || (x)==PIX_FMT_GRAY16BE || (x)==PIX_FMT_GRAY16LE)
-#define isGray16(x) ((x)==PIX_FMT_GRAY16BE || (x)==PIX_FMT_GRAY16LE)
-#define isRGB(x) ((x)==PIX_FMT_BGR32 || (x)==PIX_FMT_RGB24 \
- || (x)==PIX_FMT_RGB565 || (x)==PIX_FMT_RGB555 \
- || (x)==PIX_FMT_RGB8 || (x)==PIX_FMT_RGB4 || (x)==PIX_FMT_RGB4_BYTE \
- || (x)==PIX_FMT_MONOBLACK)
-#define isBGR(x) ((x)==PIX_FMT_RGB32 || (x)==PIX_FMT_BGR24 \
- || (x)==PIX_FMT_BGR565 || (x)==PIX_FMT_BGR555 \
- || (x)==PIX_FMT_BGR8 || (x)==PIX_FMT_BGR4 || (x)==PIX_FMT_BGR4_BYTE \
- || (x)==PIX_FMT_MONOBLACK)
+#define isPlanarYUV(x) ( \
+ (x)==PIX_FMT_YUV410P \
+ || (x)==PIX_FMT_YUV420P \
+ || (x)==PIX_FMT_YUV411P \
+ || (x)==PIX_FMT_YUV422P \
+ || (x)==PIX_FMT_YUV444P \
+ || (x)==PIX_FMT_YUV440P \
+ || (x)==PIX_FMT_NV12 \
+ || (x)==PIX_FMT_NV21 \
+ )
+#define isYUV(x) ( \
+ (x)==PIX_FMT_UYVY422 \
+ || (x)==PIX_FMT_YUYV422 \
+ || isPlanarYUV(x) \
+ )
+#define isGray(x) ( \
+ (x)==PIX_FMT_GRAY8 \
+ || (x)==PIX_FMT_GRAY16BE \
+ || (x)==PIX_FMT_GRAY16LE \
+ )
+#define isGray16(x) ( \
+ (x)==PIX_FMT_GRAY16BE \
+ || (x)==PIX_FMT_GRAY16LE \
+ )
+#define isRGB(x) ( \
+ (x)==PIX_FMT_BGR32 \
+ || (x)==PIX_FMT_RGB24 \
+ || (x)==PIX_FMT_RGB565 \
+ || (x)==PIX_FMT_RGB555 \
+ || (x)==PIX_FMT_RGB8 \
+ || (x)==PIX_FMT_RGB4 \
+ || (x)==PIX_FMT_RGB4_BYTE \
+ || (x)==PIX_FMT_MONOBLACK \
+ )
+#define isBGR(x) ( \
+ (x)==PIX_FMT_RGB32 \
+ || (x)==PIX_FMT_BGR24 \
+ || (x)==PIX_FMT_BGR565 \
+ || (x)==PIX_FMT_BGR555 \
+ || (x)==PIX_FMT_BGR8 \
+ || (x)==PIX_FMT_BGR4 \
+ || (x)==PIX_FMT_BGR4_BYTE \
+ || (x)==PIX_FMT_MONOBLACK \
+ )
static inline int fmt_depth(int fmt)
{
@@ -221,4 +272,7 @@ static inline int fmt_depth(int fmt)
}
}
-#endif
+extern const DECLARE_ALIGNED(8, uint64_t, ff_dither4[2]);
+extern const DECLARE_ALIGNED(8, uint64_t, ff_dither8[2]);
+
+#endif /* FFMPEG_SWSCALE_INTERNAL_H */
diff --git a/contrib/ffmpeg/libswscale/swscale_template.c b/contrib/ffmpeg/libswscale/swscale_template.c
index ad46be127..50bb260f2 100644
--- a/contrib/ffmpeg/libswscale/swscale_template.c
+++ b/contrib/ffmpeg/libswscale/swscale_template.c
@@ -15,7 +15,7 @@
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* the C code (not assembly, mmx, ...) of this file can be used
* under the LGPL license too
@@ -39,7 +39,7 @@
#ifdef HAVE_3DNOW
#define PREFETCH "prefetch"
#define PREFETCHW "prefetchw"
-#elif defined ( HAVE_MMX2 )
+#elif defined (HAVE_MMX2)
#define PREFETCH "prefetchnta"
#define PREFETCHW "prefetcht0"
#else
@@ -71,835 +71,835 @@
#endif
#define YSCALEYUV2YV12X(x, offset, dest, width) \
- asm volatile(\
- "xor %%"REG_a", %%"REG_a" \n\t"\
- "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
- "movq %%mm3, %%mm4 \n\t"\
- "lea " offset "(%0), %%"REG_d" \n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- ASMALIGN(4) /* FIXME Unroll? */\
- "1: \n\t"\
- "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
- "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
- "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
- "add $16, %%"REG_d" \n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "test %%"REG_S", %%"REG_S" \n\t"\
- "pmulhw %%mm0, %%mm2 \n\t"\
- "pmulhw %%mm0, %%mm5 \n\t"\
- "paddw %%mm2, %%mm3 \n\t"\
- "paddw %%mm5, %%mm4 \n\t"\
- " jnz 1b \n\t"\
- "psraw $3, %%mm3 \n\t"\
- "psraw $3, %%mm4 \n\t"\
- "packuswb %%mm4, %%mm3 \n\t"\
- MOVNTQ(%%mm3, (%1, %%REGa))\
- "add $8, %%"REG_a" \n\t"\
- "cmp %2, %%"REG_a" \n\t"\
- "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
- "movq %%mm3, %%mm4 \n\t"\
- "lea " offset "(%0), %%"REG_d" \n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "jb 1b \n\t"\
- :: "r" (&c->redDither),\
- "r" (dest), "g" (width)\
- : "%"REG_a, "%"REG_d, "%"REG_S\
- );
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ ASMALIGN(4) /* FIXME Unroll? */\
+ "1: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm3 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ " jnz 1b \n\t"\
+ "psraw $3, %%mm3 \n\t"\
+ "psraw $3, %%mm4 \n\t"\
+ "packuswb %%mm4, %%mm3 \n\t"\
+ MOVNTQ(%%mm3, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "cmp %2, %%"REG_a" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "jb 1b \n\t"\
+ :: "r" (&c->redDither),\
+ "r" (dest), "g" (width)\
+ : "%"REG_a, "%"REG_d, "%"REG_S\
+ );
#define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
- asm volatile(\
- "lea " offset "(%0), %%"REG_d" \n\t"\
- "xor %%"REG_a", %%"REG_a" \n\t"\
- "pxor %%mm4, %%mm4 \n\t"\
- "pxor %%mm5, %%mm5 \n\t"\
- "pxor %%mm6, %%mm6 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- ASMALIGN(4) \
- "1: \n\t"\
- "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0\n\t" /* srcData */\
- "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
- "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
- "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1\n\t" /* srcData */\
- "movq %%mm0, %%mm3 \n\t"\
- "punpcklwd %%mm1, %%mm0 \n\t"\
- "punpckhwd %%mm1, %%mm3 \n\t"\
- "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
- "pmaddwd %%mm1, %%mm0 \n\t"\
- "pmaddwd %%mm1, %%mm3 \n\t"\
- "paddd %%mm0, %%mm4 \n\t"\
- "paddd %%mm3, %%mm5 \n\t"\
- "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3\n\t" /* srcData */\
- "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
- "add $16, %%"REG_d" \n\t"\
- "test %%"REG_S", %%"REG_S" \n\t"\
- "movq %%mm2, %%mm0 \n\t"\
- "punpcklwd %%mm3, %%mm2 \n\t"\
- "punpckhwd %%mm3, %%mm0 \n\t"\
- "pmaddwd %%mm1, %%mm2 \n\t"\
- "pmaddwd %%mm1, %%mm0 \n\t"\
- "paddd %%mm2, %%mm6 \n\t"\
- "paddd %%mm0, %%mm7 \n\t"\
- " jnz 1b \n\t"\
- "psrad $16, %%mm4 \n\t"\
- "psrad $16, %%mm5 \n\t"\
- "psrad $16, %%mm6 \n\t"\
- "psrad $16, %%mm7 \n\t"\
- "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
- "packssdw %%mm5, %%mm4 \n\t"\
- "packssdw %%mm7, %%mm6 \n\t"\
- "paddw %%mm0, %%mm4 \n\t"\
- "paddw %%mm0, %%mm6 \n\t"\
- "psraw $3, %%mm4 \n\t"\
- "psraw $3, %%mm6 \n\t"\
- "packuswb %%mm6, %%mm4 \n\t"\
- MOVNTQ(%%mm4, (%1, %%REGa))\
- "add $8, %%"REG_a" \n\t"\
- "cmp %2, %%"REG_a" \n\t"\
- "lea " offset "(%0), %%"REG_d" \n\t"\
- "pxor %%mm4, %%mm4 \n\t"\
- "pxor %%mm5, %%mm5 \n\t"\
- "pxor %%mm6, %%mm6 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "jb 1b \n\t"\
- :: "r" (&c->redDither),\
- "r" (dest), "g" (width)\
- : "%"REG_a, "%"REG_d, "%"REG_S\
- );
+ asm volatile(\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ ASMALIGN(4) \
+ "1: \n\t"\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
+ "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm1, %%mm0 \n\t"\
+ "punpckhwd %%mm1, %%mm3 \n\t"\
+ "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm4 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
+ "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $16, %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm2 \n\t"\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm6 \n\t"\
+ "paddd %%mm0, %%mm7 \n\t"\
+ " jnz 1b \n\t"\
+ "psrad $16, %%mm4 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
+ "packssdw %%mm5, %%mm4 \n\t"\
+ "packssdw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm0, %%mm4 \n\t"\
+ "paddw %%mm0, %%mm6 \n\t"\
+ "psraw $3, %%mm4 \n\t"\
+ "psraw $3, %%mm6 \n\t"\
+ "packuswb %%mm6, %%mm4 \n\t"\
+ MOVNTQ(%%mm4, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "cmp %2, %%"REG_a" \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "jb 1b \n\t"\
+ :: "r" (&c->redDither),\
+ "r" (dest), "g" (width)\
+ : "%"REG_a, "%"REG_d, "%"REG_S\
+ );
#define YSCALEYUV2YV121 \
- "mov %2, %%"REG_a" \n\t"\
- ASMALIGN(4) /* FIXME Unroll? */\
- "1: \n\t"\
- "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
- "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
- "psraw $7, %%mm0 \n\t"\
- "psraw $7, %%mm1 \n\t"\
- "packuswb %%mm1, %%mm0 \n\t"\
- MOVNTQ(%%mm0, (%1, %%REGa))\
- "add $8, %%"REG_a" \n\t"\
- "jnc 1b \n\t"
+ "mov %2, %%"REG_a" \n\t"\
+ ASMALIGN(4) /* FIXME Unroll? */\
+ "1: \n\t"\
+ "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
+ "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
+ "psraw $7, %%mm0 \n\t"\
+ "psraw $7, %%mm1 \n\t"\
+ "packuswb %%mm1, %%mm0 \n\t"\
+ MOVNTQ(%%mm0, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "jnc 1b \n\t"
/*
- :: "m" (-lumFilterSize), "m" (-chrFilterSize),
- "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
- "r" (dest), "m" (dstW),
- "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
- : "%eax", "%ebx", "%ecx", "%edx", "%esi"
+ :: "m" (-lumFilterSize), "m" (-chrFilterSize),
+ "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
+ "r" (dest), "m" (dstW),
+ "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
+ : "%eax", "%ebx", "%ecx", "%edx", "%esi"
*/
#define YSCALEYUV2PACKEDX \
- asm volatile(\
- "xor %%"REG_a", %%"REG_a" \n\t"\
- ASMALIGN(4)\
- "nop \n\t"\
- "1: \n\t"\
- "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
- "movq %%mm3, %%mm4 \n\t"\
- ASMALIGN(4)\
- "2: \n\t"\
- "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
- "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
- "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
- "add $16, %%"REG_d" \n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "pmulhw %%mm0, %%mm2 \n\t"\
- "pmulhw %%mm0, %%mm5 \n\t"\
- "paddw %%mm2, %%mm3 \n\t"\
- "paddw %%mm5, %%mm4 \n\t"\
- "test %%"REG_S", %%"REG_S" \n\t"\
- " jnz 2b \n\t"\
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN(4)\
+ "nop \n\t"\
+ "1: \n\t"\
+ "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
+ "movq %%mm3, %%mm4 \n\t"\
+ ASMALIGN(4)\
+ "2: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm3 \n\t"\
+ "paddw %%mm5, %%mm4 \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ " jnz 2b \n\t"\
\
- "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
- "movq %%mm1, %%mm7 \n\t"\
- ASMALIGN(4)\
- "2: \n\t"\
- "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
- "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
- "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
- "add $16, %%"REG_d" \n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "pmulhw %%mm0, %%mm2 \n\t"\
- "pmulhw %%mm0, %%mm5 \n\t"\
- "paddw %%mm2, %%mm1 \n\t"\
- "paddw %%mm5, %%mm7 \n\t"\
- "test %%"REG_S", %%"REG_S" \n\t"\
- " jnz 2b \n\t"\
-
-#define YSCALEYUV2PACKEDX_END\
- :: "r" (&c->redDither), \
- "m" (dummy), "m" (dummy), "m" (dummy),\
- "r" (dest), "m" (dstW)\
- : "%"REG_a, "%"REG_d, "%"REG_S\
- );
+ "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm1 \n\t"\
+ "movq %%mm1, %%mm7 \n\t"\
+ ASMALIGN(4)\
+ "2: \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t"\
+ "pmulhw %%mm0, %%mm5 \n\t"\
+ "paddw %%mm2, %%mm1 \n\t"\
+ "paddw %%mm5, %%mm7 \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ " jnz 2b \n\t"\
+
+#define YSCALEYUV2PACKEDX_END \
+ :: "r" (&c->redDither), \
+ "m" (dummy), "m" (dummy), "m" (dummy),\
+ "r" (dest), "m" (dstW) \
+ : "%"REG_a, "%"REG_d, "%"REG_S \
+ );
#define YSCALEYUV2PACKEDX_ACCURATE \
- asm volatile(\
- "xor %%"REG_a", %%"REG_a" \n\t"\
- ASMALIGN(4)\
- "nop \n\t"\
- "1: \n\t"\
- "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "pxor %%mm4, %%mm4 \n\t"\
- "pxor %%mm5, %%mm5 \n\t"\
- "pxor %%mm6, %%mm6 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"\
- ASMALIGN(4)\
- "2: \n\t"\
- "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
- "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
- "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
- "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
- "movq %%mm0, %%mm3 \n\t"\
- "punpcklwd %%mm1, %%mm0 \n\t"\
- "punpckhwd %%mm1, %%mm3 \n\t"\
- "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
- "pmaddwd %%mm1, %%mm0 \n\t"\
- "pmaddwd %%mm1, %%mm3 \n\t"\
- "paddd %%mm0, %%mm4 \n\t"\
- "paddd %%mm3, %%mm5 \n\t"\
- "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
- "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
- "add $16, %%"REG_d" \n\t"\
- "test %%"REG_S", %%"REG_S" \n\t"\
- "movq %%mm2, %%mm0 \n\t"\
- "punpcklwd %%mm3, %%mm2 \n\t"\
- "punpckhwd %%mm3, %%mm0 \n\t"\
- "pmaddwd %%mm1, %%mm2 \n\t"\
- "pmaddwd %%mm1, %%mm0 \n\t"\
- "paddd %%mm2, %%mm6 \n\t"\
- "paddd %%mm0, %%mm7 \n\t"\
- " jnz 2b \n\t"\
- "psrad $16, %%mm4 \n\t"\
- "psrad $16, %%mm5 \n\t"\
- "psrad $16, %%mm6 \n\t"\
- "psrad $16, %%mm7 \n\t"\
- "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
- "packssdw %%mm5, %%mm4 \n\t"\
- "packssdw %%mm7, %%mm6 \n\t"\
- "paddw %%mm0, %%mm4 \n\t"\
- "paddw %%mm0, %%mm6 \n\t"\
- "movq %%mm4, "U_TEMP"(%0) \n\t"\
- "movq %%mm6, "V_TEMP"(%0) \n\t"\
+ asm volatile(\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN(4)\
+ "nop \n\t"\
+ "1: \n\t"\
+ "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pxor %%mm4, %%mm4 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ ASMALIGN(4)\
+ "2: \n\t"\
+ "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
+ "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm1, %%mm0 \n\t"\
+ "punpckhwd %%mm1, %%mm3 \n\t"\
+ "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm4 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
+ "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $16, %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm1, %%mm2 \n\t"\
+ "pmaddwd %%mm1, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm6 \n\t"\
+ "paddd %%mm0, %%mm7 \n\t"\
+ " jnz 2b \n\t"\
+ "psrad $16, %%mm4 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
+ "packssdw %%mm5, %%mm4 \n\t"\
+ "packssdw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm0, %%mm4 \n\t"\
+ "paddw %%mm0, %%mm6 \n\t"\
+ "movq %%mm4, "U_TEMP"(%0) \n\t"\
+ "movq %%mm6, "V_TEMP"(%0) \n\t"\
\
- "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
- "mov (%%"REG_d"), %%"REG_S" \n\t"\
- "pxor %%mm1, %%mm1 \n\t"\
- "pxor %%mm5, %%mm5 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"\
- "pxor %%mm6, %%mm6 \n\t"\
- ASMALIGN(4)\
- "2: \n\t"\
- "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
- "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
- "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
- "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
- "movq %%mm0, %%mm3 \n\t"\
- "punpcklwd %%mm4, %%mm0 \n\t"\
- "punpckhwd %%mm4, %%mm3 \n\t"\
- "movq 8(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
- "pmaddwd %%mm4, %%mm0 \n\t"\
- "pmaddwd %%mm4, %%mm3 \n\t"\
- "paddd %%mm0, %%mm1 \n\t"\
- "paddd %%mm3, %%mm5 \n\t"\
- "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
- "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
- "add $16, %%"REG_d" \n\t"\
- "test %%"REG_S", %%"REG_S" \n\t"\
- "movq %%mm2, %%mm0 \n\t"\
- "punpcklwd %%mm3, %%mm2 \n\t"\
- "punpckhwd %%mm3, %%mm0 \n\t"\
- "pmaddwd %%mm4, %%mm2 \n\t"\
- "pmaddwd %%mm4, %%mm0 \n\t"\
- "paddd %%mm2, %%mm7 \n\t"\
- "paddd %%mm0, %%mm6 \n\t"\
- " jnz 2b \n\t"\
- "psrad $16, %%mm1 \n\t"\
- "psrad $16, %%mm5 \n\t"\
- "psrad $16, %%mm7 \n\t"\
- "psrad $16, %%mm6 \n\t"\
- "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
- "packssdw %%mm5, %%mm1 \n\t"\
- "packssdw %%mm6, %%mm7 \n\t"\
- "paddw %%mm0, %%mm1 \n\t"\
- "paddw %%mm0, %%mm7 \n\t"\
- "movq "U_TEMP"(%0), %%mm3 \n\t"\
- "movq "V_TEMP"(%0), %%mm4 \n\t"\
+ "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "pxor %%mm1, %%mm1 \n\t"\
+ "pxor %%mm5, %%mm5 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "pxor %%mm6, %%mm6 \n\t"\
+ ASMALIGN(4)\
+ "2: \n\t"\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
+ "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
+ "movq %%mm0, %%mm3 \n\t"\
+ "punpcklwd %%mm4, %%mm0 \n\t"\
+ "punpckhwd %%mm4, %%mm3 \n\t"\
+ "movq 8(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
+ "pmaddwd %%mm4, %%mm0 \n\t"\
+ "pmaddwd %%mm4, %%mm3 \n\t"\
+ "paddd %%mm0, %%mm1 \n\t"\
+ "paddd %%mm3, %%mm5 \n\t"\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
+ "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
+ "add $16, %%"REG_d" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "punpcklwd %%mm3, %%mm2 \n\t"\
+ "punpckhwd %%mm3, %%mm0 \n\t"\
+ "pmaddwd %%mm4, %%mm2 \n\t"\
+ "pmaddwd %%mm4, %%mm0 \n\t"\
+ "paddd %%mm2, %%mm7 \n\t"\
+ "paddd %%mm0, %%mm6 \n\t"\
+ " jnz 2b \n\t"\
+ "psrad $16, %%mm1 \n\t"\
+ "psrad $16, %%mm5 \n\t"\
+ "psrad $16, %%mm7 \n\t"\
+ "psrad $16, %%mm6 \n\t"\
+ "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
+ "packssdw %%mm5, %%mm1 \n\t"\
+ "packssdw %%mm6, %%mm7 \n\t"\
+ "paddw %%mm0, %%mm1 \n\t"\
+ "paddw %%mm0, %%mm7 \n\t"\
+ "movq "U_TEMP"(%0), %%mm3 \n\t"\
+ "movq "V_TEMP"(%0), %%mm4 \n\t"\
#define YSCALEYUV2RGBX \
- "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
- "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
- "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
- "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
- "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
- "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
- /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
- "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
- "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
- "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
- "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
- "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
- "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
- /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
- "paddw %%mm3, %%mm4 \n\t"\
- "movq %%mm2, %%mm0 \n\t"\
- "movq %%mm5, %%mm6 \n\t"\
- "movq %%mm4, %%mm3 \n\t"\
- "punpcklwd %%mm2, %%mm2 \n\t"\
- "punpcklwd %%mm5, %%mm5 \n\t"\
- "punpcklwd %%mm4, %%mm4 \n\t"\
- "paddw %%mm1, %%mm2 \n\t"\
- "paddw %%mm1, %%mm5 \n\t"\
- "paddw %%mm1, %%mm4 \n\t"\
- "punpckhwd %%mm0, %%mm0 \n\t"\
- "punpckhwd %%mm6, %%mm6 \n\t"\
- "punpckhwd %%mm3, %%mm3 \n\t"\
- "paddw %%mm7, %%mm0 \n\t"\
- "paddw %%mm7, %%mm6 \n\t"\
- "paddw %%mm7, %%mm3 \n\t"\
- /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
- "packuswb %%mm0, %%mm2 \n\t"\
- "packuswb %%mm6, %%mm5 \n\t"\
- "packuswb %%mm3, %%mm4 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"
+ "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
+ "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
+/* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
+ "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
+ "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
+/* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
#if 0
#define FULL_YSCALEYUV2RGB \
- "pxor %%mm7, %%mm7 \n\t"\
- "movd %6, %%mm6 \n\t" /*yalpha1*/\
- "punpcklwd %%mm6, %%mm6 \n\t"\
- "punpcklwd %%mm6, %%mm6 \n\t"\
- "movd %7, %%mm5 \n\t" /*uvalpha1*/\
- "punpcklwd %%mm5, %%mm5 \n\t"\
- "punpcklwd %%mm5, %%mm5 \n\t"\
- "xor %%"REG_a", %%"REG_a" \n\t"\
- ASMALIGN(4)\
- "1: \n\t"\
- "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
- "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
- "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
- "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
- "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
- "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
- "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
- "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
- "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
- "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
- "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
- "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
- "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
- "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
- "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
- "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
- "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"\
+ "movd %6, %%mm6 \n\t" /*yalpha1*/\
+ "punpcklwd %%mm6, %%mm6 \n\t"\
+ "punpcklwd %%mm6, %%mm6 \n\t"\
+ "movd %7, %%mm5 \n\t" /*uvalpha1*/\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%0, %%"REG_a",2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, %%"REG_a",2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
+ "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
\
\
- "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
- "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
- "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
- "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
- "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
- "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
- "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
+ "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "pmulhw "MANGLE(ubCoeff)", %%mm3 \n\t"\
+ "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "pmulhw "MANGLE(ugCoeff)", %%mm2 \n\t"\
+ "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
\
\
- "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
- "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
- "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
- "paddw %%mm1, %%mm3 \n\t" /* B*/\
- "paddw %%mm1, %%mm0 \n\t" /* R*/\
- "packuswb %%mm3, %%mm3 \n\t"\
+ "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
+ "pmulhw "MANGLE(vrCoeff)", %%mm0 \n\t"\
+ "pmulhw "MANGLE(vgCoeff)", %%mm4 \n\t"\
+ "paddw %%mm1, %%mm3 \n\t" /* B*/\
+ "paddw %%mm1, %%mm0 \n\t" /* R*/\
+ "packuswb %%mm3, %%mm3 \n\t"\
\
- "packuswb %%mm0, %%mm0 \n\t"\
- "paddw %%mm4, %%mm2 \n\t"\
- "paddw %%mm2, %%mm1 \n\t" /* G*/\
+ "packuswb %%mm0, %%mm0 \n\t"\
+ "paddw %%mm4, %%mm2 \n\t"\
+ "paddw %%mm2, %%mm1 \n\t" /* G*/\
\
- "packuswb %%mm1, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
#endif
#define REAL_YSCALEYUV2PACKED(index, c) \
- "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
- "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
- "psraw $3, %%mm0 \n\t"\
- "psraw $3, %%mm1 \n\t"\
- "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
- "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
- "xor "#index", "#index" \n\t"\
- ASMALIGN(4)\
- "1: \n\t"\
- "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
- "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
- "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
- "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
- "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
- "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
- "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
- "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
- "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
- "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
- "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
- "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
- "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
- "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
- "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
- "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
- "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
- "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
- "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
- "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
- "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
- "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
- "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
-
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
+ "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
+ "psraw $3, %%mm0 \n\t"\
+ "psraw $3, %%mm1 \n\t"\
+ "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
+ "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
+ "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+
#define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
-
+
#define REAL_YSCALEYUV2RGB(index, c) \
- "xor "#index", "#index" \n\t"\
- ASMALIGN(4)\
- "1: \n\t"\
- "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
- "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
- "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
- "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
- "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
- "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
- "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
- "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
- "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
- "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
- "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
- "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
- "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
- "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
- "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
- "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
- "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
- "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
- "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
- /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
- "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
- "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
- "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
- "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
- "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
- "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
- "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
- "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
- "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
- "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
- "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
- "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
- "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
- "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
- "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
- "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
- /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
- "paddw %%mm3, %%mm4 \n\t"\
- "movq %%mm2, %%mm0 \n\t"\
- "movq %%mm5, %%mm6 \n\t"\
- "movq %%mm4, %%mm3 \n\t"\
- "punpcklwd %%mm2, %%mm2 \n\t"\
- "punpcklwd %%mm5, %%mm5 \n\t"\
- "punpcklwd %%mm4, %%mm4 \n\t"\
- "paddw %%mm1, %%mm2 \n\t"\
- "paddw %%mm1, %%mm5 \n\t"\
- "paddw %%mm1, %%mm4 \n\t"\
- "punpckhwd %%mm0, %%mm0 \n\t"\
- "punpckhwd %%mm6, %%mm6 \n\t"\
- "punpckhwd %%mm3, %%mm3 \n\t"\
- "paddw %%mm7, %%mm0 \n\t"\
- "paddw %%mm7, %%mm6 \n\t"\
- "paddw %%mm7, %%mm3 \n\t"\
- /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
- "packuswb %%mm0, %%mm2 \n\t"\
- "packuswb %%mm6, %%mm5 \n\t"\
- "packuswb %%mm3, %%mm4 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
+ "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
+ "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
+ "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
+ "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
+ "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
+ "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
#define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
-
+
#define REAL_YSCALEYUV2PACKED1(index, c) \
- "xor "#index", "#index" \n\t"\
- ASMALIGN(4)\
- "1: \n\t"\
- "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
- "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
- "psraw $7, %%mm3 \n\t" \
- "psraw $7, %%mm4 \n\t" \
- "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
- "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
- "psraw $7, %%mm1 \n\t" \
- "psraw $7, %%mm7 \n\t" \
-
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
+ "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $7, %%mm3 \n\t" \
+ "psraw $7, %%mm4 \n\t" \
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $7, %%mm1 \n\t" \
+ "psraw $7, %%mm7 \n\t" \
+
#define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
-
+
#define REAL_YSCALEYUV2RGB1(index, c) \
- "xor "#index", "#index" \n\t"\
- ASMALIGN(4)\
- "1: \n\t"\
- "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
- "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
- "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
- "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
- "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
- "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
- "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
- "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
- "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
- "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
- /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
- "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
- "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
- "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
- "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
- "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
- "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
- "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
- "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
- /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
- "paddw %%mm3, %%mm4 \n\t"\
- "movq %%mm2, %%mm0 \n\t"\
- "movq %%mm5, %%mm6 \n\t"\
- "movq %%mm4, %%mm3 \n\t"\
- "punpcklwd %%mm2, %%mm2 \n\t"\
- "punpcklwd %%mm5, %%mm5 \n\t"\
- "punpcklwd %%mm4, %%mm4 \n\t"\
- "paddw %%mm1, %%mm2 \n\t"\
- "paddw %%mm1, %%mm5 \n\t"\
- "paddw %%mm1, %%mm4 \n\t"\
- "punpckhwd %%mm0, %%mm0 \n\t"\
- "punpckhwd %%mm6, %%mm6 \n\t"\
- "punpckhwd %%mm3, %%mm3 \n\t"\
- "paddw %%mm7, %%mm0 \n\t"\
- "paddw %%mm7, %%mm6 \n\t"\
- "paddw %%mm7, %%mm3 \n\t"\
- /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
- "packuswb %%mm0, %%mm2 \n\t"\
- "packuswb %%mm6, %%mm5 \n\t"\
- "packuswb %%mm3, %%mm4 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
+ "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
+ "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
#define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
#define REAL_YSCALEYUV2PACKED1b(index, c) \
- "xor "#index", "#index" \n\t"\
- ASMALIGN(4)\
- "1: \n\t"\
- "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
- "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
- "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
- "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
- "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
- "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
- "psrlw $8, %%mm3 \n\t" \
- "psrlw $8, %%mm4 \n\t" \
- "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
- "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
- "psraw $7, %%mm1 \n\t" \
- "psraw $7, %%mm7 \n\t"
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+ "psrlw $8, %%mm3 \n\t" \
+ "psrlw $8, %%mm4 \n\t" \
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $7, %%mm1 \n\t" \
+ "psraw $7, %%mm7 \n\t"
#define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
-
+
// do vertical chrominance interpolation
#define REAL_YSCALEYUV2RGB1b(index, c) \
- "xor "#index", "#index" \n\t"\
- ASMALIGN(4)\
- "1: \n\t"\
- "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
- "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
- "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
- "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
- "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
- "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
- "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
- "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
- "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
- "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
- "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
- "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
- "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
- "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
- /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
- "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
- "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
- "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
- "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
- "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
- "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
- "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
- "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
- /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
- "paddw %%mm3, %%mm4 \n\t"\
- "movq %%mm2, %%mm0 \n\t"\
- "movq %%mm5, %%mm6 \n\t"\
- "movq %%mm4, %%mm3 \n\t"\
- "punpcklwd %%mm2, %%mm2 \n\t"\
- "punpcklwd %%mm5, %%mm5 \n\t"\
- "punpcklwd %%mm4, %%mm4 \n\t"\
- "paddw %%mm1, %%mm2 \n\t"\
- "paddw %%mm1, %%mm5 \n\t"\
- "paddw %%mm1, %%mm4 \n\t"\
- "punpckhwd %%mm0, %%mm0 \n\t"\
- "punpckhwd %%mm6, %%mm6 \n\t"\
- "punpckhwd %%mm3, %%mm3 \n\t"\
- "paddw %%mm7, %%mm0 \n\t"\
- "paddw %%mm7, %%mm6 \n\t"\
- "paddw %%mm7, %%mm3 \n\t"\
- /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
- "packuswb %%mm0, %%mm2 \n\t"\
- "packuswb %%mm6, %%mm5 \n\t"\
- "packuswb %%mm3, %%mm4 \n\t"\
- "pxor %%mm7, %%mm7 \n\t"
+ "xor "#index", "#index" \n\t"\
+ ASMALIGN(4)\
+ "1: \n\t"\
+ "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
+ "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
+ "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
+ "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
+ "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
+ "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
+ "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
+ "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
+ "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
+ "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
+ "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
+ /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
+ "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
+ "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
+ "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
+ "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
+ "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
+ "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
+ "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
+ "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
+ "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
+ /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
+ "paddw %%mm3, %%mm4 \n\t"\
+ "movq %%mm2, %%mm0 \n\t"\
+ "movq %%mm5, %%mm6 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
+ "punpcklwd %%mm2, %%mm2 \n\t"\
+ "punpcklwd %%mm5, %%mm5 \n\t"\
+ "punpcklwd %%mm4, %%mm4 \n\t"\
+ "paddw %%mm1, %%mm2 \n\t"\
+ "paddw %%mm1, %%mm5 \n\t"\
+ "paddw %%mm1, %%mm4 \n\t"\
+ "punpckhwd %%mm0, %%mm0 \n\t"\
+ "punpckhwd %%mm6, %%mm6 \n\t"\
+ "punpckhwd %%mm3, %%mm3 \n\t"\
+ "paddw %%mm7, %%mm0 \n\t"\
+ "paddw %%mm7, %%mm6 \n\t"\
+ "paddw %%mm7, %%mm3 \n\t"\
+ /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
+ "packuswb %%mm0, %%mm2 \n\t"\
+ "packuswb %%mm6, %%mm5 \n\t"\
+ "packuswb %%mm3, %%mm4 \n\t"\
+ "pxor %%mm7, %%mm7 \n\t"
#define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
#define REAL_WRITEBGR32(dst, dstw, index) \
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
- "movq %%mm2, %%mm1 \n\t" /* B */\
- "movq %%mm5, %%mm6 \n\t" /* R */\
- "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
- "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
- "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
- "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
- "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
- "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
- "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
- "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
- "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
- "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
\
- MOVNTQ(%%mm0, (dst, index, 4))\
- MOVNTQ(%%mm2, 8(dst, index, 4))\
- MOVNTQ(%%mm1, 16(dst, index, 4))\
- MOVNTQ(%%mm3, 24(dst, index, 4))\
+ MOVNTQ(%%mm0, (dst, index, 4))\
+ MOVNTQ(%%mm2, 8(dst, index, 4))\
+ MOVNTQ(%%mm1, 16(dst, index, 4))\
+ MOVNTQ(%%mm3, 24(dst, index, 4))\
\
- "add $8, "#index" \n\t"\
- "cmp "#dstw", "#index" \n\t"\
- " jb 1b \n\t"
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
#define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
#define REAL_WRITEBGR16(dst, dstw, index) \
- "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
- "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
- "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
- "psrlq $3, %%mm2 \n\t"\
+ "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
+ "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
+ "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
+ "psrlq $3, %%mm2 \n\t"\
\
- "movq %%mm2, %%mm1 \n\t"\
- "movq %%mm4, %%mm3 \n\t"\
+ "movq %%mm2, %%mm1 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
\
- "punpcklbw %%mm7, %%mm3 \n\t"\
- "punpcklbw %%mm5, %%mm2 \n\t"\
- "punpckhbw %%mm7, %%mm4 \n\t"\
- "punpckhbw %%mm5, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm5, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm5, %%mm1 \n\t"\
\
- "psllq $3, %%mm3 \n\t"\
- "psllq $3, %%mm4 \n\t"\
+ "psllq $3, %%mm3 \n\t"\
+ "psllq $3, %%mm4 \n\t"\
\
- "por %%mm3, %%mm2 \n\t"\
- "por %%mm4, %%mm1 \n\t"\
+ "por %%mm3, %%mm2 \n\t"\
+ "por %%mm4, %%mm1 \n\t"\
\
- MOVNTQ(%%mm2, (dst, index, 2))\
- MOVNTQ(%%mm1, 8(dst, index, 2))\
+ MOVNTQ(%%mm2, (dst, index, 2))\
+ MOVNTQ(%%mm1, 8(dst, index, 2))\
\
- "add $8, "#index" \n\t"\
- "cmp "#dstw", "#index" \n\t"\
- " jb 1b \n\t"
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
#define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
#define REAL_WRITEBGR15(dst, dstw, index) \
- "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
- "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
- "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
- "psrlq $3, %%mm2 \n\t"\
- "psrlq $1, %%mm5 \n\t"\
+ "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
+ "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
+ "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
+ "psrlq $3, %%mm2 \n\t"\
+ "psrlq $1, %%mm5 \n\t"\
\
- "movq %%mm2, %%mm1 \n\t"\
- "movq %%mm4, %%mm3 \n\t"\
+ "movq %%mm2, %%mm1 \n\t"\
+ "movq %%mm4, %%mm3 \n\t"\
\
- "punpcklbw %%mm7, %%mm3 \n\t"\
- "punpcklbw %%mm5, %%mm2 \n\t"\
- "punpckhbw %%mm7, %%mm4 \n\t"\
- "punpckhbw %%mm5, %%mm1 \n\t"\
+ "punpcklbw %%mm7, %%mm3 \n\t"\
+ "punpcklbw %%mm5, %%mm2 \n\t"\
+ "punpckhbw %%mm7, %%mm4 \n\t"\
+ "punpckhbw %%mm5, %%mm1 \n\t"\
\
- "psllq $2, %%mm3 \n\t"\
- "psllq $2, %%mm4 \n\t"\
+ "psllq $2, %%mm3 \n\t"\
+ "psllq $2, %%mm4 \n\t"\
\
- "por %%mm3, %%mm2 \n\t"\
- "por %%mm4, %%mm1 \n\t"\
+ "por %%mm3, %%mm2 \n\t"\
+ "por %%mm4, %%mm1 \n\t"\
\
- MOVNTQ(%%mm2, (dst, index, 2))\
- MOVNTQ(%%mm1, 8(dst, index, 2))\
+ MOVNTQ(%%mm2, (dst, index, 2))\
+ MOVNTQ(%%mm1, 8(dst, index, 2))\
\
- "add $8, "#index" \n\t"\
- "cmp "#dstw", "#index" \n\t"\
- " jb 1b \n\t"
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
#define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
#define WRITEBGR24OLD(dst, dstw, index) \
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
- "movq %%mm2, %%mm1 \n\t" /* B */\
- "movq %%mm5, %%mm6 \n\t" /* R */\
- "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
- "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
- "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
- "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
- "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
- "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
- "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
- "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
- "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
- "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
\
- "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
- "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
- "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
- "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
- "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
- "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
- "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
- "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
+ "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
+ "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
+ "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
+ "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
+ "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
+ "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
+ "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
+ "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
\
- "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
- "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
- "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
- "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
- "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
- "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
- "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
- "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
- "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
- "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
- "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
- "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
- "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
+ "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
+ "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
+ "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
+ "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
+ "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
+ "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
+ "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
+ "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
+ "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
+ "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
+ "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
+ "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
\
- "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
- "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
- "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
- "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
- "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
- "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
- "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
- "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
+ "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
+ "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
+ "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
+ "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
+ "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
+ "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
+ "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
+ "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
\
- MOVNTQ(%%mm0, (dst))\
- MOVNTQ(%%mm2, 8(dst))\
- MOVNTQ(%%mm3, 16(dst))\
- "add $24, "#dst" \n\t"\
+ MOVNTQ(%%mm0, (dst))\
+ MOVNTQ(%%mm2, 8(dst))\
+ MOVNTQ(%%mm3, 16(dst))\
+ "add $24, "#dst" \n\t"\
\
- "add $8, "#index" \n\t"\
- "cmp "#dstw", "#index" \n\t"\
- " jb 1b \n\t"
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
#define WRITEBGR24MMX(dst, dstw, index) \
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
- "movq %%mm2, %%mm1 \n\t" /* B */\
- "movq %%mm5, %%mm6 \n\t" /* R */\
- "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
- "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
- "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
- "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
- "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
- "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
- "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
- "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
- "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
- "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq %%mm2, %%mm1 \n\t" /* B */\
+ "movq %%mm5, %%mm6 \n\t" /* R */\
+ "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
+ "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
+ "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
+ "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
+ "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
+ "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
+ "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
+ "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
+ "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
\
- "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
- "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
- "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
- "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
+ "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
+ "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
+ "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
+ "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
\
- "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
- "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
- "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
- "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
+ "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
+ "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
+ "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
+ "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
\
- "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
- "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
- "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
- "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
+ "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
+ "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
+ "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
+ "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
\
- "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
- "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
- "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
- "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
- MOVNTQ(%%mm0, (dst))\
+ "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
+ "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
+ "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
+ "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
+ MOVNTQ(%%mm0, (dst))\
\
- "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
- "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
- "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
- "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
- MOVNTQ(%%mm6, 8(dst))\
+ "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
+ "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
+ "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
+ "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
+ MOVNTQ(%%mm6, 8(dst))\
\
- "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
- "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
- "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
- MOVNTQ(%%mm5, 16(dst))\
+ "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
+ "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
+ "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
+ MOVNTQ(%%mm5, 16(dst))\
\
- "add $24, "#dst" \n\t"\
+ "add $24, "#dst" \n\t"\
\
- "add $8, "#index" \n\t"\
- "cmp "#dstw", "#index" \n\t"\
- " jb 1b \n\t"
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
#define WRITEBGR24MMX2(dst, dstw, index) \
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
- "movq "MANGLE(M24A)", %%mm0 \n\t"\
- "movq "MANGLE(M24C)", %%mm7 \n\t"\
- "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
- "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
- "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
+ "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
+ "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
+ "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
+ "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
+ "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
\
- "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
- "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
- "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
+ "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
+ "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
+ "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
\
- "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
- "por %%mm1, %%mm6 \n\t"\
- "por %%mm3, %%mm6 \n\t"\
- MOVNTQ(%%mm6, (dst))\
+ "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
+ "por %%mm1, %%mm6 \n\t"\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, (dst))\
\
- "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
- "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
- "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
- "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
+ "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
+ "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
+ "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
+ "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
\
- "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
- "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
- "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
+ "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
+ "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
+ "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
\
- "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
- "por %%mm3, %%mm6 \n\t"\
- MOVNTQ(%%mm6, 8(dst))\
+ "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, 8(dst))\
\
- "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
- "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
- "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
+ "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
+ "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
+ "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
\
- "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
- "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
- "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
+ "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
+ "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
+ "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
\
- "por %%mm1, %%mm3 \n\t"\
- "por %%mm3, %%mm6 \n\t"\
- MOVNTQ(%%mm6, 16(dst))\
+ "por %%mm1, %%mm3 \n\t"\
+ "por %%mm3, %%mm6 \n\t"\
+ MOVNTQ(%%mm6, 16(dst))\
\
- "add $24, "#dst" \n\t"\
+ "add $24, "#dst" \n\t"\
\
- "add $8, "#index" \n\t"\
- "cmp "#dstw", "#index" \n\t"\
- " jb 1b \n\t"
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
#ifdef HAVE_MMX2
#undef WRITEBGR24
@@ -910,122 +910,122 @@
#endif
#define REAL_WRITEYUY2(dst, dstw, index) \
- "packuswb %%mm3, %%mm3 \n\t"\
- "packuswb %%mm4, %%mm4 \n\t"\
- "packuswb %%mm7, %%mm1 \n\t"\
- "punpcklbw %%mm4, %%mm3 \n\t"\
- "movq %%mm1, %%mm7 \n\t"\
- "punpcklbw %%mm3, %%mm1 \n\t"\
- "punpckhbw %%mm3, %%mm7 \n\t"\
+ "packuswb %%mm3, %%mm3 \n\t"\
+ "packuswb %%mm4, %%mm4 \n\t"\
+ "packuswb %%mm7, %%mm1 \n\t"\
+ "punpcklbw %%mm4, %%mm3 \n\t"\
+ "movq %%mm1, %%mm7 \n\t"\
+ "punpcklbw %%mm3, %%mm1 \n\t"\
+ "punpckhbw %%mm3, %%mm7 \n\t"\
\
- MOVNTQ(%%mm1, (dst, index, 2))\
- MOVNTQ(%%mm7, 8(dst, index, 2))\
+ MOVNTQ(%%mm1, (dst, index, 2))\
+ MOVNTQ(%%mm7, 8(dst, index, 2))\
\
- "add $8, "#index" \n\t"\
- "cmp "#dstw", "#index" \n\t"\
- " jb 1b \n\t"
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
+ " jb 1b \n\t"
#define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
{
#ifdef HAVE_MMX
- if(c->flags & SWS_ACCURATE_RND){
- if(uDest){
- YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
- YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
- }
-
- YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
- }else{
- if(uDest){
- YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
- YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
- }
+ if (c->flags & SWS_ACCURATE_RND){
+ if (uDest){
+ YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
+ YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
+ }
- YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
+ YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
+ }else{
+ if (uDest){
+ YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
+ YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
}
+
+ YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
+ }
#else
#ifdef HAVE_ALTIVEC
yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
- chrFilter, chrSrc, chrFilterSize,
- dest, uDest, vDest, dstW, chrDstW);
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
#else //HAVE_ALTIVEC
yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
- chrFilter, chrSrc, chrFilterSize,
- dest, uDest, vDest, dstW, chrDstW);
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
#endif //!HAVE_ALTIVEC
-#endif
+#endif /* HAVE_MMX */
}
static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
{
yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
- chrFilter, chrSrc, chrFilterSize,
- dest, uDest, dstW, chrDstW, dstFormat);
+ chrFilter, chrSrc, chrFilterSize,
+ dest, uDest, dstW, chrDstW, dstFormat);
}
static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
- uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
+ uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
{
#ifdef HAVE_MMX
- if(uDest != NULL)
- {
- asm volatile(
- YSCALEYUV2YV121
- :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
- "g" (-chrDstW)
- : "%"REG_a
- );
-
- asm volatile(
- YSCALEYUV2YV121
- :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
- "g" (-chrDstW)
- : "%"REG_a
- );
- }
-
- asm volatile(
- YSCALEYUV2YV121
- :: "r" (lumSrc + dstW), "r" (dest + dstW),
- "g" (-dstW)
- : "%"REG_a
- );
+ if (uDest)
+ {
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
+ "g" (-chrDstW)
+ : "%"REG_a
+ );
+
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
+ "g" (-chrDstW)
+ : "%"REG_a
+ );
+ }
+
+ asm volatile(
+ YSCALEYUV2YV121
+ :: "r" (lumSrc + dstW), "r" (dest + dstW),
+ "g" (-dstW)
+ : "%"REG_a
+ );
#else
- int i;
- for(i=0; i<dstW; i++)
- {
- int val= lumSrc[i]>>7;
-
- if(val&256){
- if(val<0) val=0;
- else val=255;
- }
-
- dest[i]= val;
- }
-
- if(uDest != NULL)
- for(i=0; i<chrDstW; i++)
- {
- int u=chrSrc[i]>>7;
- int v=chrSrc[i + 2048]>>7;
-
- if((u|v)&256){
- if(u<0) u=0;
- else if (u>255) u=255;
- if(v<0) v=0;
- else if (v>255) v=255;
- }
-
- uDest[i]= u;
- vDest[i]= v;
- }
+ int i;
+ for (i=0; i<dstW; i++)
+ {
+ int val= lumSrc[i]>>7;
+
+ if (val&256){
+ if (val<0) val=0;
+ else val=255;
+ }
+
+ dest[i]= val;
+ }
+
+ if (uDest)
+ for (i=0; i<chrDstW; i++)
+ {
+ int u=chrSrc[i]>>7;
+ int v=chrSrc[i + 2048]>>7;
+
+ if ((u|v)&256){
+ if (u<0) u=0;
+ else if (u>255) u=255;
+ if (v<0) v=0;
+ else if (v>255) v=255;
+ }
+
+ uDest[i]= u;
+ vDest[i]= v;
+ }
#endif
}
@@ -1034,463 +1034,462 @@ static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
* vertical scale YV12 to RGB
*/
static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, long dstW, long dstY)
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, long dstW, long dstY)
{
#ifdef HAVE_MMX
long dummy=0;
- if(c->flags & SWS_ACCURATE_RND){
- switch(c->dstFormat){
- case PIX_FMT_RGB32:
- YSCALEYUV2PACKEDX_ACCURATE
- YSCALEYUV2RGBX
- WRITEBGR32(%4, %5, %%REGa)
-
- YSCALEYUV2PACKEDX_END
- return;
- case PIX_FMT_BGR24:
- YSCALEYUV2PACKEDX_ACCURATE
- YSCALEYUV2RGBX
- "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
- "add %4, %%"REG_c" \n\t"
- WRITEBGR24(%%REGc, %5, %%REGa)
-
-
- :: "r" (&c->redDither),
- "m" (dummy), "m" (dummy), "m" (dummy),
- "r" (dest), "m" (dstW)
- : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
- );
- return;
- case PIX_FMT_BGR555:
- YSCALEYUV2PACKEDX_ACCURATE
- YSCALEYUV2RGBX
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ if (c->flags & SWS_ACCURATE_RND){
+ switch(c->dstFormat){
+ case PIX_FMT_RGB32:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ WRITEBGR32(%4, %5, %%REGa)
+
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_BGR24:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
+ "add %4, %%"REG_c" \n\t"
+ WRITEBGR24(%%REGc, %5, %%REGa)
+
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
+ );
+ return;
+ case PIX_FMT_BGR555:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
-#endif
-
- WRITEBGR15(%4, %5, %%REGa)
- YSCALEYUV2PACKEDX_END
- return;
- case PIX_FMT_BGR565:
- YSCALEYUV2PACKEDX_ACCURATE
- YSCALEYUV2RGBX
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR15(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_BGR565:
+ YSCALEYUV2PACKEDX_ACCURATE
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
-#endif
-
- WRITEBGR16(%4, %5, %%REGa)
- YSCALEYUV2PACKEDX_END
- return;
- case PIX_FMT_YUYV422:
- YSCALEYUV2PACKEDX_ACCURATE
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
-
- "psraw $3, %%mm3 \n\t"
- "psraw $3, %%mm4 \n\t"
- "psraw $3, %%mm1 \n\t"
- "psraw $3, %%mm7 \n\t"
- WRITEYUY2(%4, %5, %%REGa)
- YSCALEYUV2PACKEDX_END
- return;
- }
+ "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+#endif
+
+ WRITEBGR16(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_YUYV422:
+ YSCALEYUV2PACKEDX_ACCURATE
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+ "psraw $3, %%mm3 \n\t"
+ "psraw $3, %%mm4 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ WRITEYUY2(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ }
}else{
- switch(c->dstFormat)
- {
- case PIX_FMT_RGB32:
- YSCALEYUV2PACKEDX
- YSCALEYUV2RGBX
- WRITEBGR32(%4, %5, %%REGa)
- YSCALEYUV2PACKEDX_END
- return;
- case PIX_FMT_BGR24:
- YSCALEYUV2PACKEDX
- YSCALEYUV2RGBX
- "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
- "add %4, %%"REG_c" \n\t"
- WRITEBGR24(%%REGc, %5, %%REGa)
-
- :: "r" (&c->redDither),
- "m" (dummy), "m" (dummy), "m" (dummy),
- "r" (dest), "m" (dstW)
- : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
- );
- return;
- case PIX_FMT_BGR555:
- YSCALEYUV2PACKEDX
- YSCALEYUV2RGBX
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ switch(c->dstFormat)
+ {
+ case PIX_FMT_RGB32:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ WRITEBGR32(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_BGR24:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
+ "add %4, %%"REG_c" \n\t"
+ WRITEBGR24(%%REGc, %5, %%REGa)
+
+ :: "r" (&c->redDither),
+ "m" (dummy), "m" (dummy), "m" (dummy),
+ "r" (dest), "m" (dstW)
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
+ );
+ return;
+ case PIX_FMT_BGR555:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
-#endif
-
- WRITEBGR15(%4, %5, %%REGa)
- YSCALEYUV2PACKEDX_END
- return;
- case PIX_FMT_BGR565:
- YSCALEYUV2PACKEDX
- YSCALEYUV2RGBX
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
+#endif
+
+ WRITEBGR15(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_BGR565:
+ YSCALEYUV2PACKEDX
+ YSCALEYUV2RGBX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
-#endif
-
- WRITEBGR16(%4, %5, %%REGa)
- YSCALEYUV2PACKEDX_END
- return;
- case PIX_FMT_YUYV422:
- YSCALEYUV2PACKEDX
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
-
- "psraw $3, %%mm3 \n\t"
- "psraw $3, %%mm4 \n\t"
- "psraw $3, %%mm1 \n\t"
- "psraw $3, %%mm7 \n\t"
- WRITEYUY2(%4, %5, %%REGa)
- YSCALEYUV2PACKEDX_END
- return;
+ "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
+#endif
+
+ WRITEBGR16(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
+ case PIX_FMT_YUYV422:
+ YSCALEYUV2PACKEDX
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+
+ "psraw $3, %%mm3 \n\t"
+ "psraw $3, %%mm4 \n\t"
+ "psraw $3, %%mm1 \n\t"
+ "psraw $3, %%mm7 \n\t"
+ WRITEYUY2(%4, %5, %%REGa)
+ YSCALEYUV2PACKEDX_END
+ return;
}
}
-#endif
+#endif /* HAVE_MMX */
#ifdef HAVE_ALTIVEC
- /* The following list of supported dstFormat values should
- match what's found in the body of altivec_yuv2packedX() */
- if(c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
- c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
- c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)
- altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
- chrFilter, chrSrc, chrFilterSize,
- dest, dstW, dstY);
- else
-#endif
- yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
- chrFilter, chrSrc, chrFilterSize,
- dest, dstW, dstY);
+ /* The following list of supported dstFormat values should
+ match what's found in the body of altivec_yuv2packedX() */
+ if (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
+ c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
+ c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)
+ altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, dstW, dstY);
+ else
+#endif
+ yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
+ chrFilter, chrSrc, chrFilterSize,
+ dest, dstW, dstY);
}
/**
* vertical bilinear scale YV12 to RGB
*/
static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
- uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
+ uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
{
- int yalpha1=yalpha^4095;
- int uvalpha1=uvalpha^4095;
- int i;
+ int yalpha1=yalpha^4095;
+ int uvalpha1=uvalpha^4095;
+ int i;
#if 0 //isn't used
- if(flags&SWS_FULL_CHR_H_INT)
- {
- switch(dstFormat)
- {
+ if (flags&SWS_FULL_CHR_H_INT)
+ {
+ switch(dstFormat)
+ {
#ifdef HAVE_MMX
- case PIX_FMT_RGB32:
- asm volatile(
+ case PIX_FMT_RGB32:
+ asm volatile(
FULL_YSCALEYUV2RGB
- "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
- "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
-
- "movq %%mm3, %%mm1 \n\t"
- "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
- "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
+ "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
+ "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
- MOVNTQ(%%mm3, (%4, %%REGa, 4))
- MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
+ "movq %%mm3, %%mm1 \n\t"
+ "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
+ "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
- "add $4, %%"REG_a" \n\t"
- "cmp %5, %%"REG_a" \n\t"
- " jb 1b \n\t"
+ MOVNTQ(%%mm3, (%4, %%REGa, 4))
+ MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
- :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
- "m" (yalpha1), "m" (uvalpha1)
- : "%"REG_a
- );
- break;
- case PIX_FMT_BGR24:
- asm volatile(
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+ case PIX_FMT_BGR24:
+ asm volatile(
FULL_YSCALEYUV2RGB
- // lsb ... msb
- "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
- "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
+ // lsb ... msb
+ "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
+ "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
- "movq %%mm3, %%mm1 \n\t"
- "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
- "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
+ "movq %%mm3, %%mm1 \n\t"
+ "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
+ "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
- "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
- "psrlq $8, %%mm3 \n\t" // GR0BGR00
- "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
- "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
- "por %%mm2, %%mm3 \n\t" // BGRBGR00
- "movq %%mm1, %%mm2 \n\t"
- "psllq $48, %%mm1 \n\t" // 000000BG
- "por %%mm1, %%mm3 \n\t" // BGRBGRBG
+ "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
+ "psrlq $8, %%mm3 \n\t" // GR0BGR00
+ "pand "MANGLE(bm00000111)", %%mm2 \n\t" // BGR00000
+ "pand "MANGLE(bm11111000)", %%mm3 \n\t" // 000BGR00
+ "por %%mm2, %%mm3 \n\t" // BGRBGR00
+ "movq %%mm1, %%mm2 \n\t"
+ "psllq $48, %%mm1 \n\t" // 000000BG
+ "por %%mm1, %%mm3 \n\t" // BGRBGRBG
- "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
- "psrld $16, %%mm2 \n\t" // R000R000
- "psrlq $24, %%mm1 \n\t" // 0BGR0000
- "por %%mm2, %%mm1 \n\t" // RBGRR000
+ "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
+ "psrld $16, %%mm2 \n\t" // R000R000
+ "psrlq $24, %%mm1 \n\t" // 0BGR0000
+ "por %%mm2, %%mm1 \n\t" // RBGRR000
- "mov %4, %%"REG_b" \n\t"
- "add %%"REG_a", %%"REG_b" \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "add %%"REG_a", %%"REG_b" \n\t"
#ifdef HAVE_MMX2
- //FIXME Alignment
- "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
- "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
+ //FIXME Alignment
+ "movntq %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
+ "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
#else
- "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
- "psrlq $32, %%mm3 \n\t"
- "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
- "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
-#endif
- "add $4, %%"REG_a" \n\t"
- "cmp %5, %%"REG_a" \n\t"
- " jb 1b \n\t"
-
- :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
- "m" (yalpha1), "m" (uvalpha1)
- : "%"REG_a, "%"REG_b
- );
- break;
- case PIX_FMT_BGR555:
- asm volatile(
+ "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
+ "psrlq $32, %%mm3 \n\t"
+ "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
+ "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
+#endif
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a, "%"REG_b
+ );
+ break;
+ case PIX_FMT_BGR555:
+ asm volatile(
FULL_YSCALEYUV2RGB
#ifdef DITHER1XBPP
- "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
- "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm1 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm0 \n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm3 \n\t"
#endif
- "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
- "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
- "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
+ "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
+ "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
+ "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
- "psrlw $3, %%mm3 \n\t"
- "psllw $2, %%mm1 \n\t"
- "psllw $7, %%mm0 \n\t"
- "pand "MANGLE(g15Mask)", %%mm1 \n\t"
- "pand "MANGLE(r15Mask)", %%mm0 \n\t"
+ "psrlw $3, %%mm3 \n\t"
+ "psllw $2, %%mm1 \n\t"
+ "psllw $7, %%mm0 \n\t"
+ "pand "MANGLE(g15Mask)", %%mm1 \n\t"
+ "pand "MANGLE(r15Mask)", %%mm0 \n\t"
- "por %%mm3, %%mm1 \n\t"
- "por %%mm1, %%mm0 \n\t"
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm1, %%mm0 \n\t"
- MOVNTQ(%%mm0, (%4, %%REGa, 2))
+ MOVNTQ(%%mm0, (%4, %%REGa, 2))
- "add $4, %%"REG_a" \n\t"
- "cmp %5, %%"REG_a" \n\t"
- " jb 1b \n\t"
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
- :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
- "m" (yalpha1), "m" (uvalpha1)
- : "%"REG_a
- );
- break;
- case PIX_FMT_BGR565:
- asm volatile(
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+ case PIX_FMT_BGR565:
+ asm volatile(
FULL_YSCALEYUV2RGB
#ifdef DITHER1XBPP
- "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
- "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
-#endif
- "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
- "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
- "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
-
- "psrlw $3, %%mm3 \n\t"
- "psllw $3, %%mm1 \n\t"
- "psllw $8, %%mm0 \n\t"
- "pand "MANGLE(g16Mask)", %%mm1 \n\t"
- "pand "MANGLE(r16Mask)", %%mm0 \n\t"
-
- "por %%mm3, %%mm1 \n\t"
- "por %%mm1, %%mm0 \n\t"
-
- MOVNTQ(%%mm0, (%4, %%REGa, 2))
-
- "add $4, %%"REG_a" \n\t"
- "cmp %5, %%"REG_a" \n\t"
- " jb 1b \n\t"
-
- :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
- "m" (yalpha1), "m" (uvalpha1)
- : "%"REG_a
- );
- break;
-#endif
- case PIX_FMT_BGR32:
+ "paddusb "MANGLE(g6Dither)", %%mm1 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm0 \n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm3 \n\t"
+#endif
+ "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
+ "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
+ "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
+
+ "psrlw $3, %%mm3 \n\t"
+ "psllw $3, %%mm1 \n\t"
+ "psllw $8, %%mm0 \n\t"
+ "pand "MANGLE(g16Mask)", %%mm1 \n\t"
+ "pand "MANGLE(r16Mask)", %%mm0 \n\t"
+
+ "por %%mm3, %%mm1 \n\t"
+ "por %%mm1, %%mm0 \n\t"
+
+ MOVNTQ(%%mm0, (%4, %%REGa, 2))
+
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+ "m" (yalpha1), "m" (uvalpha1)
+ : "%"REG_a
+ );
+ break;
+#endif /* HAVE_MMX */
+ case PIX_FMT_BGR32:
#ifndef HAVE_MMX
- case PIX_FMT_RGB32:
+ case PIX_FMT_RGB32:
#endif
- if(dstFormat==PIX_FMT_RGB32)
- {
- int i;
+ if (dstFormat==PIX_FMT_RGB32)
+ {
+ int i;
#ifdef WORDS_BIGENDIAN
- dest++;
-#endif
- for(i=0;i<dstW;i++){
- // vertical linear interpolation && yuv2rgb in a single step:
- int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
- int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
- int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
- dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
- dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
- dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
- dest+= 4;
- }
- }
- else if(dstFormat==PIX_FMT_BGR24)
- {
- int i;
- for(i=0;i<dstW;i++){
- // vertical linear interpolation && yuv2rgb in a single step:
- int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
- int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
- int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
- dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
- dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
- dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
- dest+= 3;
- }
- }
- else if(dstFormat==PIX_FMT_BGR565)
- {
- int i;
- for(i=0;i<dstW;i++){
- // vertical linear interpolation && yuv2rgb in a single step:
- int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
- int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
- int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
-
- ((uint16_t*)dest)[i] =
- clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
- clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
- clip_table16r[(Y + yuvtab_3343[V]) >>13];
- }
- }
- else if(dstFormat==PIX_FMT_BGR555)
- {
- int i;
- for(i=0;i<dstW;i++){
- // vertical linear interpolation && yuv2rgb in a single step:
- int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
- int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
- int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
-
- ((uint16_t*)dest)[i] =
- clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
- clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
- clip_table15r[(Y + yuvtab_3343[V]) >>13];
- }
- }
- }//FULL_UV_IPOL
- else
- {
+ dest++;
+#endif
+ for (i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+ dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
+ dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
+ dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
+ dest+= 4;
+ }
+ }
+ else if (dstFormat==PIX_FMT_BGR24)
+ {
+ int i;
+ for (i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+ dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
+ dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
+ dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
+ dest+= 3;
+ }
+ }
+ else if (dstFormat==PIX_FMT_BGR565)
+ {
+ int i;
+ for (i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+
+ ((uint16_t*)dest)[i] =
+ clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
+ clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
+ clip_table16r[(Y + yuvtab_3343[V]) >>13];
+ }
+ }
+ else if (dstFormat==PIX_FMT_BGR555)
+ {
+ int i;
+ for (i=0;i<dstW;i++){
+ // vertical linear interpolation && yuv2rgb in a single step:
+ int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
+ int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
+ int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
+
+ ((uint16_t*)dest)[i] =
+ clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
+ clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
+ clip_table15r[(Y + yuvtab_3343[V]) >>13];
+ }
+ }
+ }//FULL_UV_IPOL
+ else
+ {
#endif // if 0
#ifdef HAVE_MMX
- switch(c->dstFormat)
- {
-//Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
- case PIX_FMT_RGB32:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB(%%REGBP, %5)
- WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR24:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB(%%REGBP, %5)
- WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR555:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB(%%REGBP, %5)
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ switch(c->dstFormat)
+ {
+ //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
+ case PIX_FMT_RGB32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR555:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
#endif
- WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR565:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB(%%REGBP, %5)
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR565:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
#endif
- WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_YUYV422:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2PACKED(%%REGBP, %5)
- WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- default: break;
- }
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_YUYV422:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ default: break;
+ }
#endif //HAVE_MMX
YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
}
@@ -1499,206 +1498,206 @@ YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
* YV12 to RGB without scaling or interpolating
*/
static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
- uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
+ uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
{
- const int yalpha1=0;
- int i;
-
- uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
- const int yalpha= 4096; //FIXME ...
-
- if(flags&SWS_FULL_CHR_H_INT)
- {
- RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
- return;
- }
+ const int yalpha1=0;
+ int i;
+
+ uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
+ const int yalpha= 4096; //FIXME ...
+
+ if (flags&SWS_FULL_CHR_H_INT)
+ {
+ RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
+ return;
+ }
#ifdef HAVE_MMX
- if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
- {
- switch(dstFormat)
- {
- case PIX_FMT_RGB32:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB1(%%REGBP, %5)
- WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR24:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB1(%%REGBP, %5)
- WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR555:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB1(%%REGBP, %5)
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
+ {
+ switch(dstFormat)
+ {
+ case PIX_FMT_RGB32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR555:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
-#endif
- WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR565:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB1(%%REGBP, %5)
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
+#endif
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR565:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
-#endif
-
- WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_YUYV422:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2PACKED1(%%REGBP, %5)
- WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- }
- }
- else
- {
- switch(dstFormat)
- {
- case PIX_FMT_RGB32:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB1b(%%REGBP, %5)
- WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR24:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB1b(%%REGBP, %5)
- WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR555:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB1b(%%REGBP, %5)
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
+#endif
+
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_YUYV422:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED1(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ }
+ }
+ else
+ {
+ switch(dstFormat)
+ {
+ case PIX_FMT_RGB32:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR24:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR555:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
-#endif
- WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_BGR565:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2RGB1b(%%REGBP, %5)
- /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
+ "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
+#endif
+ WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_BGR565:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2RGB1b(%%REGBP, %5)
+ /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
- "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
- "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
-#endif
-
- WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- case PIX_FMT_YUYV422:
- asm volatile(
- "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
- "mov %4, %%"REG_b" \n\t"
- "push %%"REG_BP" \n\t"
- YSCALEYUV2PACKED1b(%%REGBP, %5)
- WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
- "pop %%"REG_BP" \n\t"
- "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
-
- :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
- "a" (&c->redDither)
- );
- return;
- }
- }
-#endif
- if( uvalpha < 2048 )
- {
- YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
- }else{
- YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
- }
+ "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
+#endif
+
+ WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ case PIX_FMT_YUYV422:
+ asm volatile(
+ "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "push %%"REG_BP" \n\t"
+ YSCALEYUV2PACKED1b(%%REGBP, %5)
+ WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
+ "pop %%"REG_BP" \n\t"
+ "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
+
+ :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
+ "a" (&c->redDither)
+ );
+ return;
+ }
+ }
+#endif /* HAVE_MMX */
+ if (uvalpha < 2048)
+ {
+ YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
+ }else{
+ YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
+ }
}
//FIXME yuy2* can read upto 7 samples to much
@@ -1706,910 +1705,896 @@ static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *
static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
{
#ifdef HAVE_MMX
- asm volatile(
- "movq "MANGLE(bm01010101)", %%mm2\n\t"
- "mov %0, %%"REG_a" \n\t"
- "1: \n\t"
- "movq (%1, %%"REG_a",2), %%mm0 \n\t"
- "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
- "pand %%mm2, %%mm0 \n\t"
- "pand %%mm2, %%mm1 \n\t"
- "packuswb %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%2, %%"REG_a") \n\t"
- "add $8, %%"REG_a" \n\t"
- " js 1b \n\t"
- : : "g" (-width), "r" (src+width*2), "r" (dst+width)
- : "%"REG_a
- );
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm2 \n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",2), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
+ "pand %%mm2, %%mm0 \n\t"
+ "pand %%mm2, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" (-width), "r" (src+width*2), "r" (dst+width)
+ : "%"REG_a
+ );
#else
- int i;
- for(i=0; i<width; i++)
- dst[i]= src[2*i];
+ int i;
+ for (i=0; i<width; i++)
+ dst[i]= src[2*i];
#endif
}
static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
{
#ifdef HAVE_MMX
- asm volatile(
- "movq "MANGLE(bm01010101)", %%mm4\n\t"
- "mov %0, %%"REG_a" \n\t"
- "1: \n\t"
- "movq (%1, %%"REG_a",4), %%mm0 \n\t"
- "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
- "psrlw $8, %%mm0 \n\t"
- "psrlw $8, %%mm1 \n\t"
- "packuswb %%mm1, %%mm0 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "psrlw $8, %%mm0 \n\t"
- "pand %%mm4, %%mm1 \n\t"
- "packuswb %%mm0, %%mm0 \n\t"
- "packuswb %%mm1, %%mm1 \n\t"
- "movd %%mm0, (%3, %%"REG_a") \n\t"
- "movd %%mm1, (%2, %%"REG_a") \n\t"
- "add $4, %%"REG_a" \n\t"
- " js 1b \n\t"
- : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
- : "%"REG_a
- );
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm4 \n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",4), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "movd %%mm1, (%2, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
+ : "%"REG_a
+ );
#else
- int i;
- for(i=0; i<width; i++)
- {
- dstU[i]= src1[4*i + 1];
- dstV[i]= src1[4*i + 3];
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ dstU[i]= src1[4*i + 1];
+ dstV[i]= src1[4*i + 3];
+ }
#endif
- assert(src1 == src2);
+ assert(src1 == src2);
}
//this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
{
#ifdef HAVE_MMX
- asm volatile(
- "mov %0, %%"REG_a" \n\t"
- "1: \n\t"
- "movq (%1, %%"REG_a",2), %%mm0 \n\t"
- "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
- "psrlw $8, %%mm0 \n\t"
- "psrlw $8, %%mm1 \n\t"
- "packuswb %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%2, %%"REG_a") \n\t"
- "add $8, %%"REG_a" \n\t"
- " js 1b \n\t"
- : : "g" (-width), "r" (src+width*2), "r" (dst+width)
- : "%"REG_a
- );
+ asm volatile(
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",2), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "psrlw $8, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" (-width), "r" (src+width*2), "r" (dst+width)
+ : "%"REG_a
+ );
#else
- int i;
- for(i=0; i<width; i++)
- dst[i]= src[2*i+1];
+ int i;
+ for (i=0; i<width; i++)
+ dst[i]= src[2*i+1];
#endif
}
static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
{
#ifdef HAVE_MMX
- asm volatile(
- "movq "MANGLE(bm01010101)", %%mm4\n\t"
- "mov %0, %%"REG_a" \n\t"
- "1: \n\t"
- "movq (%1, %%"REG_a",4), %%mm0 \n\t"
- "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
- "pand %%mm4, %%mm0 \n\t"
- "pand %%mm4, %%mm1 \n\t"
- "packuswb %%mm1, %%mm0 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "psrlw $8, %%mm0 \n\t"
- "pand %%mm4, %%mm1 \n\t"
- "packuswb %%mm0, %%mm0 \n\t"
- "packuswb %%mm1, %%mm1 \n\t"
- "movd %%mm0, (%3, %%"REG_a") \n\t"
- "movd %%mm1, (%2, %%"REG_a") \n\t"
- "add $4, %%"REG_a" \n\t"
- " js 1b \n\t"
- : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
- : "%"REG_a
- );
+ asm volatile(
+ "movq "MANGLE(bm01010101)", %%mm4 \n\t"
+ "mov %0, %%"REG_a" \n\t"
+ "1: \n\t"
+ "movq (%1, %%"REG_a",4), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+ "pand %%mm4, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm1, %%mm0 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "psrlw $8, %%mm0 \n\t"
+ "pand %%mm4, %%mm1 \n\t"
+ "packuswb %%mm0, %%mm0 \n\t"
+ "packuswb %%mm1, %%mm1 \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "movd %%mm1, (%2, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
+ : "%"REG_a
+ );
#else
- int i;
- for(i=0; i<width; i++)
- {
- dstU[i]= src1[4*i + 0];
- dstV[i]= src1[4*i + 2];
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ dstU[i]= src1[4*i + 0];
+ dstV[i]= src1[4*i + 2];
+ }
#endif
- assert(src1 == src2);
+ assert(src1 == src2);
}
static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
{
- int i;
- for(i=0; i<width; i++)
- {
- int b= ((uint32_t*)src)[i]&0xFF;
- int g= (((uint32_t*)src)[i]>>8)&0xFF;
- int r= (((uint32_t*)src)[i]>>16)&0xFF;
-
- dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int b= ((uint32_t*)src)[i]&0xFF;
+ int g= (((uint32_t*)src)[i]>>8)&0xFF;
+ int r= (((uint32_t*)src)[i]>>16)&0xFF;
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+ }
}
static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
- int i;
- assert(src1 == src2);
- for(i=0; i<width; i++)
- {
- const int a= ((uint32_t*)src1)[2*i+0];
- const int e= ((uint32_t*)src1)[2*i+1];
- const int l= (a&0xFF00FF) + (e&0xFF00FF);
- const int h= (a&0x00FF00) + (e&0x00FF00);
- const int b= l&0x3FF;
- const int g= h>>8;
- const int r= l>>16;
-
- dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
- dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
- }
+ int i;
+ assert(src1 == src2);
+ for (i=0; i<width; i++)
+ {
+ const int a= ((uint32_t*)src1)[2*i+0];
+ const int e= ((uint32_t*)src1)[2*i+1];
+ const int l= (a&0xFF00FF) + (e&0xFF00FF);
+ const int h= (a&0x00FF00) + (e&0x00FF00);
+ const int b= l&0x3FF;
+ const int g= h>>8;
+ const int r= l>>16;
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ }
}
static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
{
#ifdef HAVE_MMX
- asm volatile(
- "mov %2, %%"REG_a" \n\t"
- "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
- "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 64(%0, %%"REG_d") \n\t"
- "movd (%0, %%"REG_d"), %%mm0 \n\t"
- "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
- "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "pmaddwd %%mm6, %%mm0 \n\t"
- "pmaddwd %%mm6, %%mm1 \n\t"
- "pmaddwd %%mm6, %%mm2 \n\t"
- "pmaddwd %%mm6, %%mm3 \n\t"
+ asm volatile(
+ "mov %2, %%"REG_a" \n\t"
+ "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_d") \n\t"
+ "movd (%0, %%"REG_d"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
#ifndef FAST_BGR2YV12
- "psrad $8, %%mm0 \n\t"
- "psrad $8, %%mm1 \n\t"
- "psrad $8, %%mm2 \n\t"
- "psrad $8, %%mm3 \n\t"
-#endif
- "packssdw %%mm1, %%mm0 \n\t"
- "packssdw %%mm3, %%mm2 \n\t"
- "pmaddwd %%mm5, %%mm0 \n\t"
- "pmaddwd %%mm5, %%mm2 \n\t"
- "packssdw %%mm2, %%mm0 \n\t"
- "psraw $7, %%mm0 \n\t"
-
- "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
- "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm1 \n\t"
- "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
- "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm3 \n\t"
- "pmaddwd %%mm6, %%mm4 \n\t"
- "pmaddwd %%mm6, %%mm1 \n\t"
- "pmaddwd %%mm6, %%mm2 \n\t"
- "pmaddwd %%mm6, %%mm3 \n\t"
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "packssdw %%mm2, %%mm0 \n\t"
+ "psraw $7, %%mm0 \n\t"
+
+ "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm1 \n\t"
+ "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
+ "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm1 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
+ "pmaddwd %%mm6, %%mm3 \n\t"
#ifndef FAST_BGR2YV12
- "psrad $8, %%mm4 \n\t"
- "psrad $8, %%mm1 \n\t"
- "psrad $8, %%mm2 \n\t"
- "psrad $8, %%mm3 \n\t"
-#endif
- "packssdw %%mm1, %%mm4 \n\t"
- "packssdw %%mm3, %%mm2 \n\t"
- "pmaddwd %%mm5, %%mm4 \n\t"
- "pmaddwd %%mm5, %%mm2 \n\t"
- "add $24, %%"REG_d" \n\t"
- "packssdw %%mm2, %%mm4 \n\t"
- "psraw $7, %%mm4 \n\t"
-
- "packuswb %%mm4, %%mm0 \n\t"
- "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
-
- "movq %%mm0, (%1, %%"REG_a") \n\t"
- "add $8, %%"REG_a" \n\t"
- " js 1b \n\t"
- : : "r" (src+width*3), "r" (dst+width), "g" (-width)
- : "%"REG_a, "%"REG_d
- );
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm1, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm2 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm2 \n\t"
+ "add $24, %%"REG_d" \n\t"
+ "packssdw %%mm2, %%mm4 \n\t"
+ "psraw $7, %%mm4 \n\t"
+
+ "packuswb %%mm4, %%mm0 \n\t"
+ "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
+
+ "movq %%mm0, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src+width*3), "r" (dst+width), "g" (-width)
+ : "%"REG_a, "%"REG_d
+ );
#else
- int i;
- for(i=0; i<width; i++)
- {
- int b= src[i*3+0];
- int g= src[i*3+1];
- int r= src[i*3+2];
-
- dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
- }
-#endif
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int b= src[i*3+0];
+ int g= src[i*3+1];
+ int r= src[i*3+2];
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+ }
+#endif /* HAVE_MMX */
}
static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
{
#ifdef HAVE_MMX
- asm volatile(
- "mov %3, %%"REG_a" \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
- "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
- "pxor %%mm7, %%mm7 \n\t"
- "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
- "add %%"REG_d", %%"REG_d" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- PREFETCH" 64(%0, %%"REG_d") \n\t"
+ asm volatile(
+ "mov %3, %%"REG_a" \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
+ "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
+ "pxor %%mm7, %%mm7 \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
+ "add %%"REG_d", %%"REG_d" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ PREFETCH" 64(%0, %%"REG_d") \n\t"
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- "movq (%0, %%"REG_d"), %%mm0 \n\t"
- "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
- "movq %%mm0, %%mm1 \n\t"
- "movq %%mm2, %%mm3 \n\t"
- "psrlq $24, %%mm0 \n\t"
- "psrlq $24, %%mm2 \n\t"
- PAVGB(%%mm1, %%mm0)
- PAVGB(%%mm3, %%mm2)
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
+ "movq (%0, %%"REG_d"), %%mm0 \n\t"
+ "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
+ "movq %%mm0, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm0 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB(%%mm1, %%mm0)
+ PAVGB(%%mm3, %%mm2)
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
#else
- "movd (%0, %%"REG_d"), %%mm0 \n\t"
- "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "paddw %%mm2, %%mm0 \n\t"
- "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
- "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "paddw %%mm4, %%mm2 \n\t"
- "psrlw $1, %%mm0 \n\t"
- "psrlw $1, %%mm2 \n\t"
-#endif
- "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
- "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
-
- "pmaddwd %%mm0, %%mm1 \n\t"
- "pmaddwd %%mm2, %%mm3 \n\t"
- "pmaddwd %%mm6, %%mm0 \n\t"
- "pmaddwd %%mm6, %%mm2 \n\t"
+ "movd (%0, %%"REG_d"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "paddw %%mm2, %%mm0 \n\t"
+ "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "paddw %%mm4, %%mm2 \n\t"
+ "psrlw $1, %%mm0 \n\t"
+ "psrlw $1, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm0, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
#ifndef FAST_BGR2YV12
- "psrad $8, %%mm0 \n\t"
- "psrad $8, %%mm1 \n\t"
- "psrad $8, %%mm2 \n\t"
- "psrad $8, %%mm3 \n\t"
-#endif
- "packssdw %%mm2, %%mm0 \n\t"
- "packssdw %%mm3, %%mm1 \n\t"
- "pmaddwd %%mm5, %%mm0 \n\t"
- "pmaddwd %%mm5, %%mm1 \n\t"
- "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
- "psraw $7, %%mm0 \n\t"
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm0 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm0 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
+ "psraw $7, %%mm0 \n\t"
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
- "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
- "movq %%mm4, %%mm1 \n\t"
- "movq %%mm2, %%mm3 \n\t"
- "psrlq $24, %%mm4 \n\t"
- "psrlq $24, %%mm2 \n\t"
- PAVGB(%%mm1, %%mm4)
- PAVGB(%%mm3, %%mm2)
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
+ "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
+ "movq %%mm4, %%mm1 \n\t"
+ "movq %%mm2, %%mm3 \n\t"
+ "psrlq $24, %%mm4 \n\t"
+ "psrlq $24, %%mm2 \n\t"
+ PAVGB(%%mm1, %%mm4)
+ PAVGB(%%mm3, %%mm2)
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
#else
- "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
- "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "paddw %%mm2, %%mm4 \n\t"
- "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
- "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm5 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "paddw %%mm5, %%mm2 \n\t"
- "movq "MANGLE(w1111)", %%mm5 \n\t"
- "psrlw $2, %%mm4 \n\t"
- "psrlw $2, %%mm2 \n\t"
-#endif
- "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
- "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
-
- "pmaddwd %%mm4, %%mm1 \n\t"
- "pmaddwd %%mm2, %%mm3 \n\t"
- "pmaddwd %%mm6, %%mm4 \n\t"
- "pmaddwd %%mm6, %%mm2 \n\t"
+ "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "paddw %%mm2, %%mm4 \n\t"
+ "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
+ "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm5 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "paddw %%mm5, %%mm2 \n\t"
+ "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
+ "psrlw $2, %%mm4 \n\t"
+ "psrlw $2, %%mm2 \n\t"
+#endif
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
+ "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
+
+ "pmaddwd %%mm4, %%mm1 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm2 \n\t"
#ifndef FAST_BGR2YV12
- "psrad $8, %%mm4 \n\t"
- "psrad $8, %%mm1 \n\t"
- "psrad $8, %%mm2 \n\t"
- "psrad $8, %%mm3 \n\t"
-#endif
- "packssdw %%mm2, %%mm4 \n\t"
- "packssdw %%mm3, %%mm1 \n\t"
- "pmaddwd %%mm5, %%mm4 \n\t"
- "pmaddwd %%mm5, %%mm1 \n\t"
- "add $24, %%"REG_d" \n\t"
- "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
- "psraw $7, %%mm4 \n\t"
-
- "movq %%mm0, %%mm1 \n\t"
- "punpckldq %%mm4, %%mm0 \n\t"
- "punpckhdq %%mm4, %%mm1 \n\t"
- "packsswb %%mm1, %%mm0 \n\t"
- "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
-
- "movd %%mm0, (%1, %%"REG_a") \n\t"
- "punpckhdq %%mm0, %%mm0 \n\t"
- "movd %%mm0, (%2, %%"REG_a") \n\t"
- "add $4, %%"REG_a" \n\t"
- " js 1b \n\t"
- : : "r" (src1+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
- : "%"REG_a, "%"REG_d
- );
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm1 \n\t"
+ "psrad $8, %%mm2 \n\t"
+ "psrad $8, %%mm3 \n\t"
+#endif
+ "packssdw %%mm2, %%mm4 \n\t"
+ "packssdw %%mm3, %%mm1 \n\t"
+ "pmaddwd %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm5, %%mm1 \n\t"
+ "add $24, %%"REG_d" \n\t"
+ "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
+ "psraw $7, %%mm4 \n\t"
+
+ "movq %%mm0, %%mm1 \n\t"
+ "punpckldq %%mm4, %%mm0 \n\t"
+ "punpckhdq %%mm4, %%mm1 \n\t"
+ "packsswb %%mm1, %%mm0 \n\t"
+ "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
+
+ "movd %%mm0, (%1, %%"REG_a") \n\t"
+ "punpckhdq %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%2, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
+ " js 1b \n\t"
+ : : "r" (src1+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
+ : "%"REG_a, "%"REG_d
+ );
#else
- int i;
- for(i=0; i<width; i++)
- {
- int b= src1[6*i + 0] + src1[6*i + 3];
- int g= src1[6*i + 1] + src1[6*i + 4];
- int r= src1[6*i + 2] + src1[6*i + 5];
-
- dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
- dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
- }
-#endif
- assert(src1 == src2);
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int b= src1[6*i + 0] + src1[6*i + 3];
+ int g= src1[6*i + 1] + src1[6*i + 4];
+ int r= src1[6*i + 2] + src1[6*i + 5];
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ }
+#endif /* HAVE_MMX */
+ assert(src1 == src2);
}
-static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
+static inline void RENAME(rgb16ToY)(uint8_t *dst, uint8_t *src, int width)
{
- int i;
- for(i=0; i<width; i++)
- {
- int d= ((uint16_t*)src)[i];
- int b= d&0x1F;
- int g= (d>>5)&0x3F;
- int r= (d>>11)&0x1F;
-
- dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int b= d&0x1F;
+ int g= (d>>5)&0x3F;
+ int r= (d>>11)&0x1F;
+
+ dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
+ }
}
-static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+static inline void RENAME(rgb16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
- int i;
- assert(src1==src2);
- for(i=0; i<width; i++)
- {
- int d0= ((uint32_t*)src1)[i];
-
- int dl= (d0&0x07E0F81F);
- int dh= ((d0>>5)&0x07C0F83F);
-
- int dh2= (dh>>11) + (dh<<21);
- int d= dh2 + dl;
-
- int b= d&0x7F;
- int r= (d>>11)&0x7F;
- int g= d>>21;
- dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
- dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
- }
+ int i;
+ assert(src1==src2);
+ for (i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+
+ int dl= (d0&0x07E0F81F);
+ int dh= ((d0>>5)&0x07C0F83F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int b= d&0x7F;
+ int r= (d>>11)&0x7F;
+ int g= d>>21;
+ dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
+ dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
+ }
}
-static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
+static inline void RENAME(rgb15ToY)(uint8_t *dst, uint8_t *src, int width)
{
- int i;
- for(i=0; i<width; i++)
- {
- int d= ((uint16_t*)src)[i];
- int b= d&0x1F;
- int g= (d>>5)&0x1F;
- int r= (d>>10)&0x1F;
-
- dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int b= d&0x1F;
+ int g= (d>>5)&0x1F;
+ int r= (d>>10)&0x1F;
+
+ dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
+ }
}
-static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+static inline void RENAME(rgb15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
- int i;
- assert(src1==src2);
- for(i=0; i<width; i++)
- {
- int d0= ((uint32_t*)src1)[i];
-
- int dl= (d0&0x03E07C1F);
- int dh= ((d0>>5)&0x03E0F81F);
-
- int dh2= (dh>>11) + (dh<<21);
- int d= dh2 + dl;
-
- int b= d&0x7F;
- int r= (d>>10)&0x7F;
- int g= d>>21;
- dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
- dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
- }
+ int i;
+ assert(src1==src2);
+ for (i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+
+ int dl= (d0&0x03E07C1F);
+ int dh= ((d0>>5)&0x03E0F81F);
+
+ int dh2= (dh>>11) + (dh<<21);
+ int d= dh2 + dl;
+
+ int b= d&0x7F;
+ int r= (d>>10)&0x7F;
+ int g= d>>21;
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
+ }
}
static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
{
- int i;
- for(i=0; i<width; i++)
- {
- int r= ((uint32_t*)src)[i]&0xFF;
- int g= (((uint32_t*)src)[i]>>8)&0xFF;
- int b= (((uint32_t*)src)[i]>>16)&0xFF;
-
- dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int r= ((uint32_t*)src)[i]&0xFF;
+ int g= (((uint32_t*)src)[i]>>8)&0xFF;
+ int b= (((uint32_t*)src)[i]>>16)&0xFF;
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+ }
}
static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
- int i;
- assert(src1==src2);
- for(i=0; i<width; i++)
- {
- const int a= ((uint32_t*)src1)[2*i+0];
- const int e= ((uint32_t*)src1)[2*i+1];
- const int l= (a&0xFF00FF) + (e&0xFF00FF);
- const int h= (a&0x00FF00) + (e&0x00FF00);
- const int r= l&0x3FF;
- const int g= h>>8;
- const int b= l>>16;
-
- dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
- dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
- }
+ int i;
+ assert(src1==src2);
+ for (i=0; i<width; i++)
+ {
+ const int a= ((uint32_t*)src1)[2*i+0];
+ const int e= ((uint32_t*)src1)[2*i+1];
+ const int l= (a&0xFF00FF) + (e&0xFF00FF);
+ const int h= (a&0x00FF00) + (e&0x00FF00);
+ const int r= l&0x3FF;
+ const int g= h>>8;
+ const int b= l>>16;
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ }
}
static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
{
- int i;
- for(i=0; i<width; i++)
- {
- int r= src[i*3+0];
- int g= src[i*3+1];
- int b= src[i*3+2];
-
- dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int r= src[i*3+0];
+ int g= src[i*3+1];
+ int b= src[i*3+2];
+
+ dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
+ }
}
static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
- int i;
- assert(src1==src2);
- for(i=0; i<width; i++)
- {
- int r= src1[6*i + 0] + src1[6*i + 3];
- int g= src1[6*i + 1] + src1[6*i + 4];
- int b= src1[6*i + 2] + src1[6*i + 5];
-
- dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
- dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
- }
+ int i;
+ assert(src1==src2);
+ for (i=0; i<width; i++)
+ {
+ int r= src1[6*i + 0] + src1[6*i + 3];
+ int g= src1[6*i + 1] + src1[6*i + 4];
+ int b= src1[6*i + 2] + src1[6*i + 5];
+
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
+ }
}
-static inline void RENAME(rgb16ToY)(uint8_t *dst, uint8_t *src, int width)
+static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
{
- int i;
- for(i=0; i<width; i++)
- {
- int d= ((uint16_t*)src)[i];
- int r= d&0x1F;
- int g= (d>>5)&0x3F;
- int b= (d>>11)&0x1F;
-
- dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int r= d&0x1F;
+ int g= (d>>5)&0x3F;
+ int b= (d>>11)&0x1F;
+
+ dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
+ }
}
-static inline void RENAME(rgb16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
- int i;
- assert(src1 == src2);
- for(i=0; i<width; i++)
- {
- int d0= ((uint32_t*)src1)[i];
-
- int dl= (d0&0x07E0F81F);
- int dh= ((d0>>5)&0x07C0F83F);
-
- int dh2= (dh>>11) + (dh<<21);
- int d= dh2 + dl;
-
- int r= d&0x7F;
- int b= (d>>11)&0x7F;
- int g= d>>21;
- dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
- dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
- }
+ int i;
+ assert(src1 == src2);
+ for (i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+
+ int dl= (d0&0x07E0F81F);
+ int d= dl + (((d0>>16) + (d0<<16))&0x07E0F81F);
+
+ int r= d&0x3F;
+ int b= (d>>11)&0x3F;
+ int g= d>>21;
+ dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
+ dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
+ }
}
-static inline void RENAME(rgb15ToY)(uint8_t *dst, uint8_t *src, int width)
+static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
{
- int i;
- for(i=0; i<width; i++)
- {
- int d= ((uint16_t*)src)[i];
- int r= d&0x1F;
- int g= (d>>5)&0x1F;
- int b= (d>>10)&0x1F;
-
- dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int d= ((uint16_t*)src)[i];
+ int r= d&0x1F;
+ int g= (d>>5)&0x1F;
+ int b= (d>>10)&0x1F;
+
+ dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
+ }
}
-static inline void RENAME(rgb15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
+static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
{
- int i;
- assert(src1 == src2);
- for(i=0; i<width; i++)
- {
- int d0= ((uint32_t*)src1)[i];
-
- int dl= (d0&0x03E07C1F);
- int dh= ((d0>>5)&0x03E0F81F);
-
- int dh2= (dh>>11) + (dh<<21);
- int d= dh2 + dl;
-
- int g= d&0x7F;
- int r= (d>>10)&0x7F;
- int b= d>>21;
- dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
- dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
- }
+ int i;
+ assert(src1 == src2);
+ for (i=0; i<width; i++)
+ {
+ int d0= ((uint32_t*)src1)[i];
+
+ int dl= (d0&0x03E07C1F);
+ int d= dl + (((d0>>16) + (d0<<16))&0x03E07C1F);
+
+ int r= d&0x3F;
+ int b= (d>>10)&0x3F;
+ int g= d>>21;
+ dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
+ dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
+ }
}
static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, int width, uint32_t *pal)
{
- int i;
- for(i=0; i<width; i++)
- {
- int d= src[i];
- int b= pal[d] &0xFF;
- int g=(pal[d]>>8 )&0xFF;
- int r= pal[d]>>16;
-
- dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
- }
+ int i;
+ for (i=0; i<width; i++)
+ {
+ int d= src[i];
+
+ dst[i]= pal[d] & 0xFF;
+ }
}
static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width, uint32_t *pal)
{
- int i;
- assert(src1 == src2);
- for(i=0; i<width; i++)
- {
- int d0= src1[2*i ];
- int d1= src1[2*i+1];
- int p = (pal[d0]&0xFF00FF) + (pal[d1]&0xFF00FF);
- int g = (pal[d0]+pal[d1]-p)>>8;
- int b= p&0x1FF;
- int r= p>>16;
-
- dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
- dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
- }
+ int i;
+ assert(src1 == src2);
+ for (i=0; i<width; i++)
+ {
+ int p= pal[src1[i]];
+
+ dstU[i]= p>>8;
+ dstV[i]= p>>16;
+ }
}
// Bilinear / Bicubic scaling
static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
- int16_t *filter, int16_t *filterPos, long filterSize)
+ int16_t *filter, int16_t *filterPos, long filterSize)
{
#ifdef HAVE_MMX
- assert(filterSize % 4 == 0 && filterSize>0);
- if(filterSize==4) // allways true for upscaling, sometimes for down too
- {
- long counter= -2*dstW;
- filter-= counter*2;
- filterPos-= counter/2;
- dst-= counter/2;
- asm volatile(
+ assert(filterSize % 4 == 0 && filterSize>0);
+ if (filterSize==4) // Always true for upscaling, sometimes for down, too.
+ {
+ long counter= -2*dstW;
+ filter-= counter*2;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
#if defined(PIC)
- "push %%"REG_b" \n\t"
-#endif
- "pxor %%mm7, %%mm7 \n\t"
- "movq "MANGLE(w02)", %%mm6 \n\t"
- "push %%"REG_BP" \n\t" // we use 7 regs here ...
- "mov %%"REG_a", %%"REG_BP" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- "movzwl (%2, %%"REG_BP"), %%eax \n\t"
- "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
- "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
- "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
- "movd (%3, %%"REG_a"), %%mm0 \n\t"
- "movd (%3, %%"REG_b"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "pmaddwd %%mm1, %%mm0 \n\t"
- "pmaddwd %%mm2, %%mm3 \n\t"
- "psrad $8, %%mm0 \n\t"
- "psrad $8, %%mm3 \n\t"
- "packssdw %%mm3, %%mm0 \n\t"
- "pmaddwd %%mm6, %%mm0 \n\t"
- "packssdw %%mm0, %%mm0 \n\t"
- "movd %%mm0, (%4, %%"REG_BP") \n\t"
- "add $4, %%"REG_BP" \n\t"
- " jnc 1b \n\t"
-
- "pop %%"REG_BP" \n\t"
+ "push %%"REG_b" \n\t"
+#endif
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ "push %%"REG_BP" \n\t" // we use 7 regs here ...
+ "mov %%"REG_a", %%"REG_BP" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movzwl (%2, %%"REG_BP"), %%eax \n\t"
+ "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
+ "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
+ "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
+ "movd (%3, %%"REG_a"), %%mm0 \n\t"
+ "movd (%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm3 \n\t"
+ "packssdw %%mm3, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "packssdw %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%4, %%"REG_BP") \n\t"
+ "add $4, %%"REG_BP" \n\t"
+ " jnc 1b \n\t"
+
+ "pop %%"REG_BP" \n\t"
#if defined(PIC)
- "pop %%"REG_b" \n\t"
+ "pop %%"REG_b" \n\t"
#endif
- : "+a" (counter)
- : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
+ : "+a" (counter)
+ : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
#if !defined(PIC)
- : "%"REG_b
+ : "%"REG_b
#endif
- );
- }
- else if(filterSize==8)
- {
- long counter= -2*dstW;
- filter-= counter*4;
- filterPos-= counter/2;
- dst-= counter/2;
- asm volatile(
+ );
+ }
+ else if (filterSize==8)
+ {
+ long counter= -2*dstW;
+ filter-= counter*4;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
#if defined(PIC)
- "push %%"REG_b" \n\t"
-#endif
- "pxor %%mm7, %%mm7 \n\t"
- "movq "MANGLE(w02)", %%mm6 \n\t"
- "push %%"REG_BP" \n\t" // we use 7 regs here ...
- "mov %%"REG_a", %%"REG_BP" \n\t"
- ASMALIGN(4)
- "1: \n\t"
- "movzwl (%2, %%"REG_BP"), %%eax \n\t"
- "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
- "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
- "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
- "movd (%3, %%"REG_a"), %%mm0 \n\t"
- "movd (%3, %%"REG_b"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "pmaddwd %%mm1, %%mm0 \n\t"
- "pmaddwd %%mm2, %%mm3 \n\t"
-
- "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
- "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
- "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
- "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
- "punpcklbw %%mm7, %%mm4 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "pmaddwd %%mm1, %%mm4 \n\t"
- "pmaddwd %%mm2, %%mm5 \n\t"
- "paddd %%mm4, %%mm0 \n\t"
- "paddd %%mm5, %%mm3 \n\t"
-
- "psrad $8, %%mm0 \n\t"
- "psrad $8, %%mm3 \n\t"
- "packssdw %%mm3, %%mm0 \n\t"
- "pmaddwd %%mm6, %%mm0 \n\t"
- "packssdw %%mm0, %%mm0 \n\t"
- "movd %%mm0, (%4, %%"REG_BP") \n\t"
- "add $4, %%"REG_BP" \n\t"
- " jnc 1b \n\t"
-
- "pop %%"REG_BP" \n\t"
+ "push %%"REG_b" \n\t"
+#endif
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ "push %%"REG_BP" \n\t" // we use 7 regs here ...
+ "mov %%"REG_a", %%"REG_BP" \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "movzwl (%2, %%"REG_BP"), %%eax \n\t"
+ "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
+ "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
+ "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
+ "movd (%3, %%"REG_a"), %%mm0 \n\t"
+ "movd (%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+
+ "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
+ "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
+ "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
+ "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm4 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm4 \n\t"
+ "pmaddwd %%mm2, %%mm5 \n\t"
+ "paddd %%mm4, %%mm0 \n\t"
+ "paddd %%mm5, %%mm3 \n\t"
+
+ "psrad $8, %%mm0 \n\t"
+ "psrad $8, %%mm3 \n\t"
+ "packssdw %%mm3, %%mm0 \n\t"
+ "pmaddwd %%mm6, %%mm0 \n\t"
+ "packssdw %%mm0, %%mm0 \n\t"
+ "movd %%mm0, (%4, %%"REG_BP") \n\t"
+ "add $4, %%"REG_BP" \n\t"
+ " jnc 1b \n\t"
+
+ "pop %%"REG_BP" \n\t"
#if defined(PIC)
- "pop %%"REG_b" \n\t"
+ "pop %%"REG_b" \n\t"
#endif
- : "+a" (counter)
- : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
+ : "+a" (counter)
+ : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
#if !defined(PIC)
- : "%"REG_b
+ : "%"REG_b
#endif
- );
- }
- else
- {
- uint8_t *offset = src+filterSize;
- long counter= -2*dstW;
-// filter-= counter*filterSize/2;
- filterPos-= counter/2;
- dst-= counter/2;
- asm volatile(
- "pxor %%mm7, %%mm7 \n\t"
- "movq "MANGLE(w02)", %%mm6 \n\t"
- ASMALIGN(4)
- "1: \n\t"
- "mov %2, %%"REG_c" \n\t"
- "movzwl (%%"REG_c", %0), %%eax \n\t"
- "movzwl 2(%%"REG_c", %0), %%edx \n\t"
- "mov %5, %%"REG_c" \n\t"
- "pxor %%mm4, %%mm4 \n\t"
- "pxor %%mm5, %%mm5 \n\t"
- "2: \n\t"
- "movq (%1), %%mm1 \n\t"
- "movq (%1, %6), %%mm3 \n\t"
- "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
- "movd (%%"REG_c", %%"REG_d"), %%mm2\n\t"
- "punpcklbw %%mm7, %%mm0 \n\t"
- "punpcklbw %%mm7, %%mm2 \n\t"
- "pmaddwd %%mm1, %%mm0 \n\t"
- "pmaddwd %%mm2, %%mm3 \n\t"
- "paddd %%mm3, %%mm5 \n\t"
- "paddd %%mm0, %%mm4 \n\t"
- "add $8, %1 \n\t"
- "add $4, %%"REG_c" \n\t"
- "cmp %4, %%"REG_c" \n\t"
- " jb 2b \n\t"
- "add %6, %1 \n\t"
- "psrad $8, %%mm4 \n\t"
- "psrad $8, %%mm5 \n\t"
- "packssdw %%mm5, %%mm4 \n\t"
- "pmaddwd %%mm6, %%mm4 \n\t"
- "packssdw %%mm4, %%mm4 \n\t"
- "mov %3, %%"REG_a" \n\t"
- "movd %%mm4, (%%"REG_a", %0) \n\t"
- "add $4, %0 \n\t"
- " jnc 1b \n\t"
-
- : "+r" (counter), "+r" (filter)
- : "m" (filterPos), "m" (dst), "m"(offset),
- "m" (src), "r" (filterSize*2)
- : "%"REG_a, "%"REG_c, "%"REG_d
- );
- }
+ );
+ }
+ else
+ {
+ uint8_t *offset = src+filterSize;
+ long counter= -2*dstW;
+ //filter-= counter*filterSize/2;
+ filterPos-= counter/2;
+ dst-= counter/2;
+ asm volatile(
+ "pxor %%mm7, %%mm7 \n\t"
+ "movq "MANGLE(w02)", %%mm6 \n\t"
+ ASMALIGN(4)
+ "1: \n\t"
+ "mov %2, %%"REG_c" \n\t"
+ "movzwl (%%"REG_c", %0), %%eax \n\t"
+ "movzwl 2(%%"REG_c", %0), %%edx \n\t"
+ "mov %5, %%"REG_c" \n\t"
+ "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm5, %%mm5 \n\t"
+ "2: \n\t"
+ "movq (%1), %%mm1 \n\t"
+ "movq (%1, %6), %%mm3 \n\t"
+ "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
+ "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
+ "punpcklbw %%mm7, %%mm0 \n\t"
+ "punpcklbw %%mm7, %%mm2 \n\t"
+ "pmaddwd %%mm1, %%mm0 \n\t"
+ "pmaddwd %%mm2, %%mm3 \n\t"
+ "paddd %%mm3, %%mm5 \n\t"
+ "paddd %%mm0, %%mm4 \n\t"
+ "add $8, %1 \n\t"
+ "add $4, %%"REG_c" \n\t"
+ "cmp %4, %%"REG_c" \n\t"
+ " jb 2b \n\t"
+ "add %6, %1 \n\t"
+ "psrad $8, %%mm4 \n\t"
+ "psrad $8, %%mm5 \n\t"
+ "packssdw %%mm5, %%mm4 \n\t"
+ "pmaddwd %%mm6, %%mm4 \n\t"
+ "packssdw %%mm4, %%mm4 \n\t"
+ "mov %3, %%"REG_a" \n\t"
+ "movd %%mm4, (%%"REG_a", %0) \n\t"
+ "add $4, %0 \n\t"
+ " jnc 1b \n\t"
+
+ : "+r" (counter), "+r" (filter)
+ : "m" (filterPos), "m" (dst), "m"(offset),
+ "m" (src), "r" (filterSize*2)
+ : "%"REG_a, "%"REG_c, "%"REG_d
+ );
+ }
#else
#ifdef HAVE_ALTIVEC
- hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
+ hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
#else
- int i;
- for(i=0; i<dstW; i++)
- {
- int j;
- int srcPos= filterPos[i];
- int val=0;
-// printf("filterPos: %d\n", filterPos[i]);
- for(j=0; j<filterSize; j++)
- {
-// printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
- val += ((int)src[srcPos + j])*filter[filterSize*i + j];
- }
-// filter += hFilterSize;
- dst[i] = av_clip(val>>7, 0, (1<<15)-1); // the cubic equation does overflow ...
-// dst[i] = val>>7;
- }
-#endif
-#endif
+ int i;
+ for (i=0; i<dstW; i++)
+ {
+ int j;
+ int srcPos= filterPos[i];
+ int val=0;
+ //printf("filterPos: %d\n", filterPos[i]);
+ for (j=0; j<filterSize; j++)
+ {
+ //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
+ val += ((int)src[srcPos + j])*filter[filterSize*i + j];
+ }
+ //filter += hFilterSize;
+ dst[i] = av_clip(val>>7, 0, (1<<15)-1); // the cubic equation does overflow ...
+ //dst[i] = val>>7;
+ }
+#endif /* HAVE_ALTIVEC */
+#endif /* HAVE_MMX */
}
// *** horizontal scale Y line to temp buffer
static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
- int flags, int canMMX2BeUsed, int16_t *hLumFilter,
- int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
- int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
- int32_t *mmx2FilterPos, uint8_t *pal)
+ int flags, int canMMX2BeUsed, int16_t *hLumFilter,
+ int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
+ int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
+ int32_t *mmx2FilterPos, uint8_t *pal)
{
- if(srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
+ if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
{
- RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
+ else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
{
- RENAME(uyvyToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(uyvyToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_RGB32)
+ else if (srcFormat==PIX_FMT_RGB32)
{
- RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_BGR24)
+ else if (srcFormat==PIX_FMT_BGR24)
{
- RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_BGR565)
+ else if (srcFormat==PIX_FMT_BGR565)
{
- RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_BGR555)
+ else if (srcFormat==PIX_FMT_BGR555)
{
- RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_BGR32)
+ else if (srcFormat==PIX_FMT_BGR32)
{
- RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_RGB24)
+ else if (srcFormat==PIX_FMT_RGB24)
{
- RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_RGB565)
+ else if (srcFormat==PIX_FMT_RGB565)
{
- RENAME(rgb16ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(rgb16ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_RGB555)
+ else if (srcFormat==PIX_FMT_RGB555)
{
- RENAME(rgb15ToY)(formatConvBuffer, src, srcW);
- src= formatConvBuffer;
+ RENAME(rgb15ToY)(formatConvBuffer, src, srcW);
+ src= formatConvBuffer;
}
- else if(srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
+ else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
{
- RENAME(palToY)(formatConvBuffer, src, srcW, pal);
- src= formatConvBuffer;
+ RENAME(palToY)(formatConvBuffer, src, srcW, pal);
+ src= formatConvBuffer;
}
#ifdef HAVE_MMX
- // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
- if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
+ // use the new MMX scaler if the mmx2 can't be used (it is faster than the x86 ASM one)
+ if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
#else
- if(!(flags&SWS_FAST_BILINEAR))
+ if (!(flags&SWS_FAST_BILINEAR))
#endif
{
- RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
+ RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
}
else // Fast Bilinear upscale / crap downscale
{
#if defined(ARCH_X86)
#ifdef HAVE_MMX2
- int i;
+ int i;
#if defined(PIC)
- uint64_t ebxsave __attribute__((aligned(8)));
+ uint64_t ebxsave __attribute__((aligned(8)));
#endif
- if(canMMX2BeUsed)
- {
- asm volatile(
+ if (canMMX2BeUsed)
+ {
+ asm volatile(
#if defined(PIC)
- "mov %%"REG_b", %5 \n\t"
-#endif
- "pxor %%mm7, %%mm7 \n\t"
- "mov %0, %%"REG_c" \n\t"
- "mov %1, %%"REG_D" \n\t"
- "mov %2, %%"REG_d" \n\t"
- "mov %3, %%"REG_b" \n\t"
- "xor %%"REG_a", %%"REG_a" \n\t" // i
- PREFETCH" (%%"REG_c") \n\t"
- PREFETCH" 32(%%"REG_c") \n\t"
- PREFETCH" 64(%%"REG_c") \n\t"
+ "mov %%"REG_b", %5 \n\t"
+#endif
+ "pxor %%mm7, %%mm7 \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "mov %1, %%"REG_D" \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "mov %3, %%"REG_b" \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
#ifdef ARCH_X86_64
#define FUNNY_Y_CODE \
- "movl (%%"REG_b"), %%esi \n\t"\
- "call *%4 \n\t"\
- "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
- "add %%"REG_S", %%"REG_c" \n\t"\
- "add %%"REG_a", %%"REG_D" \n\t"\
- "xor %%"REG_a", %%"REG_a" \n\t"\
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
+ "add %%"REG_S", %%"REG_c" \n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
#else
#define FUNNY_Y_CODE \
- "movl (%%"REG_b"), %%esi \n\t"\
- "call *%4 \n\t"\
- "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
- "add %%"REG_a", %%"REG_D" \n\t"\
- "xor %%"REG_a", %%"REG_a" \n\t"\
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
-#endif
+#endif /* ARCH_X86_64 */
FUNNY_Y_CODE
FUNNY_Y_CODE
@@ -2621,225 +2606,225 @@ FUNNY_Y_CODE
FUNNY_Y_CODE
#if defined(PIC)
- "mov %5, %%"REG_b" \n\t"
+ "mov %5, %%"REG_b" \n\t"
#endif
- :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
- "m" (funnyYCode)
+ :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
+ "m" (funnyYCode)
#if defined(PIC)
- ,"m" (ebxsave)
+ ,"m" (ebxsave)
#endif
- : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
#if !defined(PIC)
- ,"%"REG_b
-#endif
- );
- for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
- }
- else
- {
+ ,"%"REG_b
#endif
- long xInc_shr16 = xInc >> 16;
- uint16_t xInc_mask = xInc & 0xffff;
- //NO MMX just normal asm ...
- asm volatile(
- "xor %%"REG_a", %%"REG_a" \n\t" // i
- "xor %%"REG_d", %%"REG_d" \n\t" // xx
- "xorl %%ecx, %%ecx \n\t" // 2*xalpha
- ASMALIGN(4)
- "1: \n\t"
- "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
- "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
- "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
- "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
- "shll $16, %%edi \n\t"
- "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
- "mov %1, %%"REG_D" \n\t"
- "shrl $9, %%esi \n\t"
- "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
- "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
- "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
-
- "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
- "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
- "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
- "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
- "shll $16, %%edi \n\t"
- "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
- "mov %1, %%"REG_D" \n\t"
- "shrl $9, %%esi \n\t"
- "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
- "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
- "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
-
-
- "add $2, %%"REG_a" \n\t"
- "cmp %2, %%"REG_a" \n\t"
- " jb 1b \n\t"
-
-
- :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
- : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
- );
+ );
+ for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
+ }
+ else
+ {
+#endif /* HAVE_MMX2 */
+ long xInc_shr16 = xInc >> 16;
+ uint16_t xInc_mask = xInc & 0xffff;
+ //NO MMX just normal asm ...
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "xor %%"REG_d", %%"REG_d" \n\t" // xx
+ "xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ASMALIGN(4)
+ "1: \n\t"
+ "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
+ "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
+
+ "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
+ "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
+
+
+ "add $2, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
+ " jb 1b \n\t"
+
+
+ :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
+ : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
+ );
#ifdef HAVE_MMX2
- } //if MMX2 can't be used
+ } //if MMX2 can't be used
#endif
#else
- int i;
- unsigned int xpos=0;
- for(i=0;i<dstWidth;i++)
- {
- register unsigned int xx=xpos>>16;
- register unsigned int xalpha=(xpos&0xFFFF)>>9;
- dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
- xpos+=xInc;
- }
-#endif
+ int i;
+ unsigned int xpos=0;
+ for (i=0;i<dstWidth;i++)
+ {
+ register unsigned int xx=xpos>>16;
+ register unsigned int xalpha=(xpos&0xFFFF)>>9;
+ dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
+ xpos+=xInc;
+ }
+#endif /* defined(ARCH_X86) */
}
}
inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
- int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
- int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
- int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
- int32_t *mmx2FilterPos, uint8_t *pal)
+ int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
+ int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
+ int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
+ int32_t *mmx2FilterPos, uint8_t *pal)
{
- if(srcFormat==PIX_FMT_YUYV422)
+ if (srcFormat==PIX_FMT_YUYV422)
{
- RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_UYVY422)
+ else if (srcFormat==PIX_FMT_UYVY422)
{
- RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_RGB32)
+ else if (srcFormat==PIX_FMT_RGB32)
{
- RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_BGR24)
+ else if (srcFormat==PIX_FMT_BGR24)
{
- RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_BGR565)
+ else if (srcFormat==PIX_FMT_BGR565)
{
- RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_BGR555)
+ else if (srcFormat==PIX_FMT_BGR555)
{
- RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_BGR32)
+ else if (srcFormat==PIX_FMT_BGR32)
{
- RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_RGB24)
+ else if (srcFormat==PIX_FMT_RGB24)
{
- RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_RGB565)
+ else if (srcFormat==PIX_FMT_RGB565)
{
- RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(srcFormat==PIX_FMT_RGB555)
+ else if (srcFormat==PIX_FMT_RGB555)
{
- RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
- else if(isGray(srcFormat))
+ else if (isGray(srcFormat))
{
- return;
+ return;
}
- else if(srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
+ else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
{
- RENAME(palToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW, pal);
- src1= formatConvBuffer;
- src2= formatConvBuffer+2048;
+ RENAME(palToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW, pal);
+ src1= formatConvBuffer;
+ src2= formatConvBuffer+2048;
}
#ifdef HAVE_MMX
- // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
- if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
+ // use the new MMX scaler if the mmx2 can't be used (it is faster than the x86 ASM one)
+ if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
#else
- if(!(flags&SWS_FAST_BILINEAR))
+ if (!(flags&SWS_FAST_BILINEAR))
#endif
{
- RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
- RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
+ RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
+ RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
}
else // Fast Bilinear upscale / crap downscale
{
#if defined(ARCH_X86)
#ifdef HAVE_MMX2
- int i;
+ int i;
#if defined(PIC)
- uint64_t ebxsave __attribute__((aligned(8)));
+ uint64_t ebxsave __attribute__((aligned(8)));
#endif
- if(canMMX2BeUsed)
- {
- asm volatile(
+ if (canMMX2BeUsed)
+ {
+ asm volatile(
#if defined(PIC)
- "mov %%"REG_b", %6 \n\t"
-#endif
- "pxor %%mm7, %%mm7 \n\t"
- "mov %0, %%"REG_c" \n\t"
- "mov %1, %%"REG_D" \n\t"
- "mov %2, %%"REG_d" \n\t"
- "mov %3, %%"REG_b" \n\t"
- "xor %%"REG_a", %%"REG_a" \n\t" // i
- PREFETCH" (%%"REG_c") \n\t"
- PREFETCH" 32(%%"REG_c") \n\t"
- PREFETCH" 64(%%"REG_c") \n\t"
+ "mov %%"REG_b", %6 \n\t"
+#endif
+ "pxor %%mm7, %%mm7 \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "mov %1, %%"REG_D" \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "mov %3, %%"REG_b" \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
#ifdef ARCH_X86_64
#define FUNNY_UV_CODE \
- "movl (%%"REG_b"), %%esi \n\t"\
- "call *%4 \n\t"\
- "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
- "add %%"REG_S", %%"REG_c" \n\t"\
- "add %%"REG_a", %%"REG_D" \n\t"\
- "xor %%"REG_a", %%"REG_a" \n\t"\
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
+ "add %%"REG_S", %%"REG_c" \n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
#else
#define FUNNY_UV_CODE \
- "movl (%%"REG_b"), %%esi \n\t"\
- "call *%4 \n\t"\
- "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
- "add %%"REG_a", %%"REG_D" \n\t"\
- "xor %%"REG_a", %%"REG_a" \n\t"\
+ "movl (%%"REG_b"), %%esi \n\t"\
+ "call *%4 \n\t"\
+ "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
-#endif
+#endif /* ARCH_X86_64 */
FUNNY_UV_CODE
FUNNY_UV_CODE
FUNNY_UV_CODE
FUNNY_UV_CODE
- "xor %%"REG_a", %%"REG_a" \n\t" // i
- "mov %5, %%"REG_c" \n\t" // src
- "mov %1, %%"REG_D" \n\t" // buf1
- "add $4096, %%"REG_D" \n\t"
- PREFETCH" (%%"REG_c") \n\t"
- PREFETCH" 32(%%"REG_c") \n\t"
- PREFETCH" 64(%%"REG_c") \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "mov %5, %%"REG_c" \n\t" // src
+ "mov %1, %%"REG_D" \n\t" // buf1
+ "add $4096, %%"REG_D" \n\t"
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
FUNNY_UV_CODE
FUNNY_UV_CODE
@@ -2847,442 +2832,442 @@ FUNNY_UV_CODE
FUNNY_UV_CODE
#if defined(PIC)
- "mov %6, %%"REG_b" \n\t"
+ "mov %6, %%"REG_b" \n\t"
#endif
- :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
- "m" (funnyUVCode), "m" (src2)
+ :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
+ "m" (funnyUVCode), "m" (src2)
#if defined(PIC)
- ,"m" (ebxsave)
+ ,"m" (ebxsave)
#endif
- : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
+ : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
#if !defined(PIC)
- ,"%"REG_b
-#endif
- );
- for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
- {
-// printf("%d %d %d\n", dstWidth, i, srcW);
- dst[i] = src1[srcW-1]*128;
- dst[i+2048] = src2[srcW-1]*128;
- }
- }
- else
- {
-#endif
- long xInc_shr16 = (long) (xInc >> 16);
- uint16_t xInc_mask = xInc & 0xffff;
- asm volatile(
- "xor %%"REG_a", %%"REG_a" \n\t" // i
- "xor %%"REG_d", %%"REG_d" \n\t" // xx
- "xorl %%ecx, %%ecx \n\t" // 2*xalpha
- ASMALIGN(4)
- "1: \n\t"
- "mov %0, %%"REG_S" \n\t"
- "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
- "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
- "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
- "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
- "shll $16, %%edi \n\t"
- "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
- "mov %1, %%"REG_D" \n\t"
- "shrl $9, %%esi \n\t"
- "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
-
- "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
- "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
- "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
- "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
- "shll $16, %%edi \n\t"
- "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
- "mov %1, %%"REG_D" \n\t"
- "shrl $9, %%esi \n\t"
- "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
-
- "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
- "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
- "add $1, %%"REG_a" \n\t"
- "cmp %2, %%"REG_a" \n\t"
- " jb 1b \n\t"
+ ,"%"REG_b
+#endif
+ );
+ for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
+ {
+ //printf("%d %d %d\n", dstWidth, i, srcW);
+ dst[i] = src1[srcW-1]*128;
+ dst[i+2048] = src2[srcW-1]*128;
+ }
+ }
+ else
+ {
+#endif /* HAVE_MMX2 */
+ long xInc_shr16 = (long) (xInc >> 16);
+ uint16_t xInc_mask = xInc & 0xffff;
+ asm volatile(
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "xor %%"REG_d", %%"REG_d" \n\t" // xx
+ "xorl %%ecx, %%ecx \n\t" // 2*xalpha
+ ASMALIGN(4)
+ "1: \n\t"
+ "mov %0, %%"REG_S" \n\t"
+ "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
+ "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
+
+ "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
+ "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
+ "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
+ "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
+ "shll $16, %%edi \n\t"
+ "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
+ "mov %1, %%"REG_D" \n\t"
+ "shrl $9, %%esi \n\t"
+ "movw %%si, 4096(%%"REG_D", %%"REG_a", 2) \n\t"
+
+ "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
+ "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
+ "add $1, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
+ " jb 1b \n\t"
/* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
which is needed to support GCC-4.0 */
-#if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
- :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
+#if defined(ARCH_X86_64) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+ :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
#else
- :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
+ :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
#endif
- "r" (src2)
- : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
- );
+ "r" (src2)
+ : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
+ );
#ifdef HAVE_MMX2
- } //if MMX2 can't be used
+ } //if MMX2 can't be used
#endif
#else
- int i;
- unsigned int xpos=0;
- for(i=0;i<dstWidth;i++)
- {
- register unsigned int xx=xpos>>16;
- register unsigned int xalpha=(xpos&0xFFFF)>>9;
- dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
- dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
-/* slower
- dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
- dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
-*/
- xpos+=xInc;
- }
-#endif
- }
+ int i;
+ unsigned int xpos=0;
+ for (i=0;i<dstWidth;i++)
+ {
+ register unsigned int xx=xpos>>16;
+ register unsigned int xalpha=(xpos&0xFFFF)>>9;
+ dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
+ dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
+ /* slower
+ dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
+ dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
+ */
+ xpos+=xInc;
+ }
+#endif /* defined(ARCH_X86) */
+ }
}
static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
-
- /* load a few things into local vars to make the code more readable? and faster */
- const int srcW= c->srcW;
- const int dstW= c->dstW;
- const int dstH= c->dstH;
- const int chrDstW= c->chrDstW;
- const int chrSrcW= c->chrSrcW;
- const int lumXInc= c->lumXInc;
- const int chrXInc= c->chrXInc;
- const int dstFormat= c->dstFormat;
- const int srcFormat= c->srcFormat;
- const int flags= c->flags;
- const int canMMX2BeUsed= c->canMMX2BeUsed;
- int16_t *vLumFilterPos= c->vLumFilterPos;
- int16_t *vChrFilterPos= c->vChrFilterPos;
- int16_t *hLumFilterPos= c->hLumFilterPos;
- int16_t *hChrFilterPos= c->hChrFilterPos;
- int16_t *vLumFilter= c->vLumFilter;
- int16_t *vChrFilter= c->vChrFilter;
- int16_t *hLumFilter= c->hLumFilter;
- int16_t *hChrFilter= c->hChrFilter;
- int32_t *lumMmxFilter= c->lumMmxFilter;
- int32_t *chrMmxFilter= c->chrMmxFilter;
- const int vLumFilterSize= c->vLumFilterSize;
- const int vChrFilterSize= c->vChrFilterSize;
- const int hLumFilterSize= c->hLumFilterSize;
- const int hChrFilterSize= c->hChrFilterSize;
- int16_t **lumPixBuf= c->lumPixBuf;
- int16_t **chrPixBuf= c->chrPixBuf;
- const int vLumBufSize= c->vLumBufSize;
- const int vChrBufSize= c->vChrBufSize;
- uint8_t *funnyYCode= c->funnyYCode;
- uint8_t *funnyUVCode= c->funnyUVCode;
- uint8_t *formatConvBuffer= c->formatConvBuffer;
- const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
- const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
- int lastDstY;
- uint8_t *pal=NULL;
-
- /* vars whch will change and which we need to storw back in the context */
- int dstY= c->dstY;
- int lumBufIndex= c->lumBufIndex;
- int chrBufIndex= c->chrBufIndex;
- int lastInLumBuf= c->lastInLumBuf;
- int lastInChrBuf= c->lastInChrBuf;
-
- if(isPacked(c->srcFormat)){
- pal= src[1];
- src[0]=
- src[1]=
- src[2]= src[0];
- srcStride[0]=
- srcStride[1]=
- srcStride[2]= srcStride[0];
- }
- srcStride[1]<<= c->vChrDrop;
- srcStride[2]<<= c->vChrDrop;
-
-// printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
-// (int)dst[0], (int)dst[1], (int)dst[2]);
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+
+ /* load a few things into local vars to make the code more readable? and faster */
+ const int srcW= c->srcW;
+ const int dstW= c->dstW;
+ const int dstH= c->dstH;
+ const int chrDstW= c->chrDstW;
+ const int chrSrcW= c->chrSrcW;
+ const int lumXInc= c->lumXInc;
+ const int chrXInc= c->chrXInc;
+ const int dstFormat= c->dstFormat;
+ const int srcFormat= c->srcFormat;
+ const int flags= c->flags;
+ const int canMMX2BeUsed= c->canMMX2BeUsed;
+ int16_t *vLumFilterPos= c->vLumFilterPos;
+ int16_t *vChrFilterPos= c->vChrFilterPos;
+ int16_t *hLumFilterPos= c->hLumFilterPos;
+ int16_t *hChrFilterPos= c->hChrFilterPos;
+ int16_t *vLumFilter= c->vLumFilter;
+ int16_t *vChrFilter= c->vChrFilter;
+ int16_t *hLumFilter= c->hLumFilter;
+ int16_t *hChrFilter= c->hChrFilter;
+ int32_t *lumMmxFilter= c->lumMmxFilter;
+ int32_t *chrMmxFilter= c->chrMmxFilter;
+ const int vLumFilterSize= c->vLumFilterSize;
+ const int vChrFilterSize= c->vChrFilterSize;
+ const int hLumFilterSize= c->hLumFilterSize;
+ const int hChrFilterSize= c->hChrFilterSize;
+ int16_t **lumPixBuf= c->lumPixBuf;
+ int16_t **chrPixBuf= c->chrPixBuf;
+ const int vLumBufSize= c->vLumBufSize;
+ const int vChrBufSize= c->vChrBufSize;
+ uint8_t *funnyYCode= c->funnyYCode;
+ uint8_t *funnyUVCode= c->funnyUVCode;
+ uint8_t *formatConvBuffer= c->formatConvBuffer;
+ const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
+ const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
+ int lastDstY;
+ uint8_t *pal=NULL;
+
+ /* vars whch will change and which we need to storw back in the context */
+ int dstY= c->dstY;
+ int lumBufIndex= c->lumBufIndex;
+ int chrBufIndex= c->chrBufIndex;
+ int lastInLumBuf= c->lastInLumBuf;
+ int lastInChrBuf= c->lastInChrBuf;
+
+ if (isPacked(c->srcFormat)){
+ pal= src[1];
+ src[0]=
+ src[1]=
+ src[2]= src[0];
+ srcStride[0]=
+ srcStride[1]=
+ srcStride[2]= srcStride[0];
+ }
+ srcStride[1]<<= c->vChrDrop;
+ srcStride[2]<<= c->vChrDrop;
+
+ //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
+ // (int)dst[0], (int)dst[1], (int)dst[2]);
#if 0 //self test FIXME move to a vfilter or something
-{
-static volatile int i=0;
-i++;
-if(srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
- selfTest(src, srcStride, c->srcW, c->srcH);
-i--;
-}
+ {
+ static volatile int i=0;
+ i++;
+ if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
+ selfTest(src, srcStride, c->srcW, c->srcH);
+ i--;
+ }
#endif
-//printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
-//dstStride[0],dstStride[1],dstStride[2]);
-
- if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
- {
- static int firstTime=1; //FIXME move this into the context perhaps
- if(flags & SWS_PRINT_INFO && firstTime)
- {
- av_log(c, AV_LOG_WARNING, "SwScaler: Warning: dstStride is not aligned!\n"
- "SwScaler: ->cannot do aligned memory acesses anymore\n");
- firstTime=0;
- }
- }
-
- /* Note the user might start scaling the picture in the middle so this will not get executed
- this is not really intended but works currently, so ppl might do it */
- if(srcSliceY ==0){
- lumBufIndex=0;
- chrBufIndex=0;
- dstY=0;
- lastInLumBuf= -1;
- lastInChrBuf= -1;
- }
-
- lastDstY= dstY;
-
- for(;dstY < dstH; dstY++){
- unsigned char *dest =dst[0]+dstStride[0]*dstY;
- const int chrDstY= dstY>>c->chrDstVSubSample;
- unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
- unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
-
- const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
- const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
- const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
- const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
-
-//printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
-// dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
- //handle holes (FAST_BILINEAR & weird filters)
- if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
- if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
-//printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
- ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
- ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
-
- // Do we have enough lines in this slice to output the dstY line
- if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
- {
- //Do horizontal scaling
- while(lastInLumBuf < lastLumSrcY)
- {
- uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
- lumBufIndex++;
-// printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
- ASSERT(lumBufIndex < 2*vLumBufSize)
- ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
- ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
-// printf("%d %d\n", lumBufIndex, vLumBufSize);
- RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
- flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
- funnyYCode, c->srcFormat, formatConvBuffer,
- c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
- lastInLumBuf++;
- }
- while(lastInChrBuf < lastChrSrcY)
- {
- uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
- uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
- chrBufIndex++;
- ASSERT(chrBufIndex < 2*vChrBufSize)
- ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
- ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
- //FIXME replace parameters through context struct (some at least)
-
- if(!(isGray(srcFormat) || isGray(dstFormat)))
- RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
- flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
- funnyUVCode, c->srcFormat, formatConvBuffer,
- c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
- lastInChrBuf++;
- }
- //wrap buf index around to stay inside the ring buffer
- if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
- if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
- }
- else // not enough lines left in this slice -> load the rest in the buffer
- {
-/* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
- firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
- lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
- vChrBufSize, vLumBufSize);*/
-
- //Do horizontal scaling
- while(lastInLumBuf+1 < srcSliceY + srcSliceH)
- {
- uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
- lumBufIndex++;
- ASSERT(lumBufIndex < 2*vLumBufSize)
- ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
- ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
- RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
- flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
- funnyYCode, c->srcFormat, formatConvBuffer,
- c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
- lastInLumBuf++;
- }
- while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
- {
- uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
- uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
- chrBufIndex++;
- ASSERT(chrBufIndex < 2*vChrBufSize)
- ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
- ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
-
- if(!(isGray(srcFormat) || isGray(dstFormat)))
- RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
- flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
- funnyUVCode, c->srcFormat, formatConvBuffer,
- c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
- lastInChrBuf++;
- }
- //wrap buf index around to stay inside the ring buffer
- if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
- if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
- break; //we can't output a dstY line so let's try with the next slice
- }
+ //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
+ //dstStride[0],dstStride[1],dstStride[2]);
+
+ if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
+ {
+ static int firstTime=1; //FIXME move this into the context perhaps
+ if (flags & SWS_PRINT_INFO && firstTime)
+ {
+ av_log(c, AV_LOG_WARNING, "SwScaler: Warning: dstStride is not aligned!\n"
+ "SwScaler: ->cannot do aligned memory acesses anymore\n");
+ firstTime=0;
+ }
+ }
+
+ /* Note the user might start scaling the picture in the middle so this will not get executed
+ this is not really intended but works currently, so ppl might do it */
+ if (srcSliceY ==0){
+ lumBufIndex=0;
+ chrBufIndex=0;
+ dstY=0;
+ lastInLumBuf= -1;
+ lastInChrBuf= -1;
+ }
+
+ lastDstY= dstY;
+
+ for (;dstY < dstH; dstY++){
+ unsigned char *dest =dst[0]+dstStride[0]*dstY;
+ const int chrDstY= dstY>>c->chrDstVSubSample;
+ unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
+ unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
+
+ const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
+ const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
+ const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
+ const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
+
+ //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
+ // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
+ //handle holes (FAST_BILINEAR & weird filters)
+ if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
+ if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
+ //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
+ ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
+ ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
+
+ // Do we have enough lines in this slice to output the dstY line
+ if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
+ {
+ //Do horizontal scaling
+ while(lastInLumBuf < lastLumSrcY)
+ {
+ uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
+ lumBufIndex++;
+ //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
+ ASSERT(lumBufIndex < 2*vLumBufSize)
+ ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
+ ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
+ //printf("%d %d\n", lumBufIndex, vLumBufSize);
+ RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
+ flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
+ funnyYCode, c->srcFormat, formatConvBuffer,
+ c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
+ lastInLumBuf++;
+ }
+ while(lastInChrBuf < lastChrSrcY)
+ {
+ uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
+ uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
+ chrBufIndex++;
+ ASSERT(chrBufIndex < 2*vChrBufSize)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
+ //FIXME replace parameters through context struct (some at least)
+
+ if (!(isGray(srcFormat) || isGray(dstFormat)))
+ RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
+ flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
+ funnyUVCode, c->srcFormat, formatConvBuffer,
+ c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
+ lastInChrBuf++;
+ }
+ //wrap buf index around to stay inside the ring buffer
+ if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
+ if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
+ }
+ else // not enough lines left in this slice -> load the rest in the buffer
+ {
+ /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
+ firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
+ lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
+ vChrBufSize, vLumBufSize);*/
+
+ //Do horizontal scaling
+ while(lastInLumBuf+1 < srcSliceY + srcSliceH)
+ {
+ uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
+ lumBufIndex++;
+ ASSERT(lumBufIndex < 2*vLumBufSize)
+ ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
+ ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
+ RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
+ flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
+ funnyYCode, c->srcFormat, formatConvBuffer,
+ c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
+ lastInLumBuf++;
+ }
+ while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
+ {
+ uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
+ uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
+ chrBufIndex++;
+ ASSERT(chrBufIndex < 2*vChrBufSize)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
+ ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
+
+ if (!(isGray(srcFormat) || isGray(dstFormat)))
+ RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
+ flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
+ funnyUVCode, c->srcFormat, formatConvBuffer,
+ c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
+ lastInChrBuf++;
+ }
+ //wrap buf index around to stay inside the ring buffer
+ if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
+ if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
+ break; //we can't output a dstY line so let's try with the next slice
+ }
#ifdef HAVE_MMX
- b5Dither= dither8[dstY&1];
- g6Dither= dither4[dstY&1];
- g5Dither= dither8[dstY&1];
- r5Dither= dither8[(dstY+1)&1];
-#endif
- if(dstY < dstH-2)
- {
- int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
- int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
+ b5Dither= ff_dither8[dstY&1];
+ g6Dither= ff_dither4[dstY&1];
+ g5Dither= ff_dither8[dstY&1];
+ r5Dither= ff_dither8[(dstY+1)&1];
+#endif
+ if (dstY < dstH-2)
+ {
+ int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
+ int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
#ifdef HAVE_MMX
- int i;
- if(flags & SWS_ACCURATE_RND){
- for(i=0; i<vLumFilterSize; i+=2){
- lumMmxFilter[2*i+0]= (int32_t)lumSrcPtr[i ];
- lumMmxFilter[2*i+1]= (int32_t)lumSrcPtr[i+(vLumFilterSize>1)];
- lumMmxFilter[2*i+2]=
- lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ]
- + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
- }
- for(i=0; i<vChrFilterSize; i+=2){
- chrMmxFilter[2*i+0]= (int32_t)chrSrcPtr[i ];
- chrMmxFilter[2*i+1]= (int32_t)chrSrcPtr[i+(vChrFilterSize>1)];
- chrMmxFilter[2*i+2]=
- chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ]
- + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
- }
- }else{
- for(i=0; i<vLumFilterSize; i++)
- {
- lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
- lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
- lumMmxFilter[4*i+2]=
- lumMmxFilter[4*i+3]=
- ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
- }
- for(i=0; i<vChrFilterSize; i++)
- {
- chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
- chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
- chrMmxFilter[4*i+2]=
- chrMmxFilter[4*i+3]=
- ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
- }
+ int i;
+ if (flags & SWS_ACCURATE_RND){
+ for (i=0; i<vLumFilterSize; i+=2){
+ lumMmxFilter[2*i+0]= (int32_t)lumSrcPtr[i ];
+ lumMmxFilter[2*i+1]= (int32_t)lumSrcPtr[i+(vLumFilterSize>1)];
+ lumMmxFilter[2*i+2]=
+ lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ]
+ + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
+ }
+ for (i=0; i<vChrFilterSize; i+=2){
+ chrMmxFilter[2*i+0]= (int32_t)chrSrcPtr[i ];
+ chrMmxFilter[2*i+1]= (int32_t)chrSrcPtr[i+(vChrFilterSize>1)];
+ chrMmxFilter[2*i+2]=
+ chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ]
+ + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
+ }
+ }else{
+ for (i=0; i<vLumFilterSize; i++)
+ {
+ lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
+ lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
+ lumMmxFilter[4*i+2]=
+ lumMmxFilter[4*i+3]=
+ ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
+ }
+ for (i=0; i<vChrFilterSize; i++)
+ {
+ chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
+ chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
+ chrMmxFilter[4*i+2]=
+ chrMmxFilter[4*i+3]=
+ ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
}
+ }
#endif
- if(dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
- const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
- if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
- RENAME(yuv2nv12X)(c,
- vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
- vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
- dest, uDest, dstW, chrDstW, dstFormat);
- }
- else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
- {
- const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
- if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
- if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
- {
- int16_t *lumBuf = lumPixBuf[0];
- int16_t *chrBuf= chrPixBuf[0];
- RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
- }
- else //General YV12
- {
- RENAME(yuv2yuvX)(c,
- vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
- vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
- dest, uDest, vDest, dstW, chrDstW);
- }
- }
- else
- {
- ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
- ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
- if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
- {
- int chrAlpha= vChrFilter[2*dstY+1];
- RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
- dest, dstW, chrAlpha, dstFormat, flags, dstY);
- }
- else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
- {
- int lumAlpha= vLumFilter[2*dstY+1];
- int chrAlpha= vChrFilter[2*dstY+1];
- lumMmxFilter[2]=
- lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
- chrMmxFilter[2]=
- chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
- RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
- dest, dstW, lumAlpha, chrAlpha, dstY);
- }
- else //General RGB
- {
- RENAME(yuv2packedX)(c,
- vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
- vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
- dest, dstW, dstY);
- }
- }
+ if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
+ RENAME(yuv2nv12X)(c,
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, dstW, chrDstW, dstFormat);
+ }
+ else if (isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
+ {
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
+ if (vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
+ {
+ int16_t *lumBuf = lumPixBuf[0];
+ int16_t *chrBuf= chrPixBuf[0];
+ RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
+ }
+ else //General YV12
+ {
+ RENAME(yuv2yuvX)(c,
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+ }
+ }
+ else
+ {
+ ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
+ ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
+ if (vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
+ {
+ int chrAlpha= vChrFilter[2*dstY+1];
+ RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
+ dest, dstW, chrAlpha, dstFormat, flags, dstY);
+ }
+ else if (vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
+ {
+ int lumAlpha= vLumFilter[2*dstY+1];
+ int chrAlpha= vChrFilter[2*dstY+1];
+ lumMmxFilter[2]=
+ lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
+ chrMmxFilter[2]=
+ chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
+ RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
+ dest, dstW, lumAlpha, chrAlpha, dstY);
+ }
+ else //General RGB
+ {
+ RENAME(yuv2packedX)(c,
+ vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+ vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, dstW, dstY);
+ }
}
- else // hmm looks like we can't use MMX here without overwriting this array's tail
- {
- int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
- int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
- if(dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
- const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
- if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
- yuv2nv12XinC(
- vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
- vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
- dest, uDest, dstW, chrDstW, dstFormat);
- }
- else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
- {
- const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
- if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
- yuv2yuvXinC(
- vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
- vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
- dest, uDest, vDest, dstW, chrDstW);
- }
- else
- {
- ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
- ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
- yuv2packedXinC(c,
- vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
- vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
- dest, dstW, dstY);
- }
- }
- }
+ }
+ else // hmm looks like we can't use MMX here without overwriting this array's tail
+ {
+ int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
+ int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
+ if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
+ yuv2nv12XinC(
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, dstW, chrDstW, dstFormat);
+ }
+ else if (isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
+ {
+ const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
+ if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
+ yuv2yuvXinC(
+ vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
+ vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, uDest, vDest, dstW, chrDstW);
+ }
+ else
+ {
+ ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
+ ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
+ yuv2packedXinC(c,
+ vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
+ vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
+ dest, dstW, dstY);
+ }
+ }
+ }
#ifdef HAVE_MMX
- __asm __volatile(SFENCE:::"memory");
- __asm __volatile(EMMS:::"memory");
+ asm volatile(SFENCE:::"memory");
+ asm volatile(EMMS:::"memory");
#endif
- /* store changed local vars back in the context */
- c->dstY= dstY;
- c->lumBufIndex= lumBufIndex;
- c->chrBufIndex= chrBufIndex;
- c->lastInLumBuf= lastInLumBuf;
- c->lastInChrBuf= lastInChrBuf;
-
- return dstY - lastDstY;
+ /* store changed local vars back in the context */
+ c->dstY= dstY;
+ c->lumBufIndex= lumBufIndex;
+ c->chrBufIndex= chrBufIndex;
+ c->lastInLumBuf= lastInLumBuf;
+ c->lastInChrBuf= lastInChrBuf;
+
+ return dstY - lastDstY;
}
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb.c b/contrib/ffmpeg/libswscale/yuv2rgb.c
index af7f86f40..8b4622311 100644
--- a/contrib/ffmpeg/libswscale/yuv2rgb.c
+++ b/contrib/ffmpeg/libswscale/yuv2rgb.c
@@ -1,14 +1,17 @@
/*
- * yuv2rgb.c, Software YUV to RGB coverter
+ * yuv2rgb.c, Software YUV to RGB converter
*
* Copyright (C) 1999, Aaron Holtzman <aholtzma@ess.engr.uvic.ca>
- * All Rights Reserved.
*
* Functions broken out from display_x11.c and several new modes
- * added by Håkan Hjort <d95hjort@dtek.chalmers.se>
+ * added by HÃ¥kan Hjort <d95hjort@dtek.chalmers.se>
*
* 15 & 16 bpp support by Franck Sicard <Franck.Sicard@solsoft.fr>
*
+ * MMX/MMX2 template stuff (needed for fast movntq support),
+ * 1,4,8bpp support and context / deglobalize stuff
+ * by Michael Niedermayer (michaelni@gmx.at)
+ *
* This file is part of mpeg2dec, a free MPEG-2 video decoder
*
* mpeg2dec is free software; you can redistribute it and/or modify
@@ -24,10 +27,6 @@
* You should have received a copy of the GNU General Public License
* along with mpeg2dec; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * MMX/MMX2 Template stuff from Michael Niedermayer (michaelni@gmx.at) (needed for fast movntq support)
- * 1,4,8bpp support by Michael Niedermayer (michaelni@gmx.at)
- * context / deglobalize stuff by Michael Niedermayer
*/
#include <stdio.h>
@@ -40,6 +39,10 @@
#include "swscale.h"
#include "swscale_internal.h"
+#ifdef HAVE_VIS
+#include "yuv2rgb_vis.c"
+#endif
+
#ifdef HAVE_MLIB
#include "yuv2rgb_mlib.c"
#endif
@@ -156,13 +159,9 @@ const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
#ifdef HAVE_MMX
/* hope these constant values are cache line aligned */
-static uint64_t attribute_used __attribute__((aligned(8))) mmx_00ffw = 0x00ff00ff00ff00ffULL;
-static uint64_t attribute_used __attribute__((aligned(8))) mmx_redmask = 0xf8f8f8f8f8f8f8f8ULL;
-static uint64_t attribute_used __attribute__((aligned(8))) mmx_grnmask = 0xfcfcfcfcfcfcfcfcULL;
-
-static uint64_t attribute_used __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFULL;
-static uint64_t attribute_used __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00ULL;
-static uint64_t attribute_used __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_00ffw) = 0x00ff00ff00ff00ffULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_redmask) = 0xf8f8f8f8f8f8f8f8ULL;
+DECLARE_ASM_CONST(8, uint64_t, mmx_grnmask) = 0xfcfcfcfcfcfcfcfcULL;
// the volatile is required because gcc otherwise optimizes some writes away not knowing that these
// are read in the asm block
@@ -171,14 +170,6 @@ static volatile uint64_t attribute_used __attribute__((aligned(8))) g5Dither;
static volatile uint64_t attribute_used __attribute__((aligned(8))) g6Dither;
static volatile uint64_t attribute_used __attribute__((aligned(8))) r5Dither;
-static uint64_t __attribute__((aligned(8))) dither4[2]={
- 0x0103010301030103LL,
- 0x0200020002000200LL,};
-
-static uint64_t __attribute__((aligned(8))) dither8[2]={
- 0x0602060206020602LL,
- 0x0004000400040004LL,};
-
#undef HAVE_MMX
//MMX versions
@@ -210,404 +201,452 @@ const int32_t Inverse_Table_6_9[8][4] = {
{117579, 136230, 16907, 35559} /* SMPTE 240M (1987) */
};
-#define RGB(i) \
- U = pu[i]; \
- V = pv[i]; \
- r = (void *)c->table_rV[V]; \
- g = (void *)(c->table_gU[U] + c->table_gV[V]); \
- b = (void *)c->table_bU[U];
-
-#define DST1(i) \
- Y = py_1[2*i]; \
- dst_1[2*i] = r[Y] + g[Y] + b[Y]; \
- Y = py_1[2*i+1]; \
- dst_1[2*i+1] = r[Y] + g[Y] + b[Y];
-
-#define DST2(i) \
- Y = py_2[2*i]; \
- dst_2[2*i] = r[Y] + g[Y] + b[Y]; \
- Y = py_2[2*i+1]; \
- dst_2[2*i+1] = r[Y] + g[Y] + b[Y];
-
-#define DST1RGB(i) \
- Y = py_1[2*i]; \
- dst_1[6*i] = r[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = b[Y]; \
- Y = py_1[2*i+1]; \
- dst_1[6*i+3] = r[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = b[Y];
-
-#define DST2RGB(i) \
- Y = py_2[2*i]; \
- dst_2[6*i] = r[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = b[Y]; \
- Y = py_2[2*i+1]; \
- dst_2[6*i+3] = r[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = b[Y];
-
-#define DST1BGR(i) \
- Y = py_1[2*i]; \
- dst_1[6*i] = b[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = r[Y]; \
- Y = py_1[2*i+1]; \
- dst_1[6*i+3] = b[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = r[Y];
-
-#define DST2BGR(i) \
- Y = py_2[2*i]; \
- dst_2[6*i] = b[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = r[Y]; \
- Y = py_2[2*i+1]; \
- dst_2[6*i+3] = b[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = r[Y];
+#define RGB(i) \
+ U = pu[i]; \
+ V = pv[i]; \
+ r = (void *)c->table_rV[V]; \
+ g = (void *)(c->table_gU[U] + c->table_gV[V]); \
+ b = (void *)c->table_bU[U];
+
+#define DST1(i) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y] + g[Y] + b[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y] + g[Y] + b[Y];
+
+#define DST2(i) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y] + g[Y] + b[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y] + g[Y] + b[Y];
+
+#define DST1RGB(i) \
+ Y = py_1[2*i]; \
+ dst_1[6*i] = r[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = b[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[6*i+3] = r[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = b[Y];
+
+#define DST2RGB(i) \
+ Y = py_2[2*i]; \
+ dst_2[6*i] = r[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = b[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[6*i+3] = r[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = b[Y];
+
+#define DST1BGR(i) \
+ Y = py_1[2*i]; \
+ dst_1[6*i] = b[Y]; dst_1[6*i+1] = g[Y]; dst_1[6*i+2] = r[Y]; \
+ Y = py_1[2*i+1]; \
+ dst_1[6*i+3] = b[Y]; dst_1[6*i+4] = g[Y]; dst_1[6*i+5] = r[Y];
+
+#define DST2BGR(i) \
+ Y = py_2[2*i]; \
+ dst_2[6*i] = b[Y]; dst_2[6*i+1] = g[Y]; dst_2[6*i+2] = r[Y]; \
+ Y = py_2[2*i+1]; \
+ dst_2[6*i+3] = b[Y]; dst_2[6*i+4] = g[Y]; dst_2[6*i+5] = r[Y];
#define PROLOG(func_name, dst_type) \
static int func_name(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY, \
- int srcSliceH, uint8_t* dst[], int dstStride[]){\
+ int srcSliceH, uint8_t* dst[], int dstStride[]){\
int y;\
\
- if(c->srcFormat == PIX_FMT_YUV422P){\
- srcStride[1] *= 2;\
- srcStride[2] *= 2;\
+ if (c->srcFormat == PIX_FMT_YUV422P){\
+ srcStride[1] *= 2;\
+ srcStride[2] *= 2;\
}\
- for(y=0; y<srcSliceH; y+=2){\
- dst_type *dst_1= (dst_type*)(dst[0] + (y+srcSliceY )*dstStride[0]);\
- dst_type *dst_2= (dst_type*)(dst[0] + (y+srcSliceY+1)*dstStride[0]);\
- dst_type attribute_unused *r, *b;\
- dst_type *g;\
- uint8_t *py_1= src[0] + y*srcStride[0];\
- uint8_t *py_2= py_1 + srcStride[0];\
- uint8_t *pu= src[1] + (y>>1)*srcStride[1];\
- uint8_t *pv= src[2] + (y>>1)*srcStride[2];\
- unsigned int h_size= c->dstW>>3;\
- while (h_size--) {\
- int attribute_unused U, V;\
- int Y;\
-
-#define EPILOG(dst_delta)\
- pu += 4;\
- pv += 4;\
- py_1 += 8;\
- py_2 += 8;\
- dst_1 += dst_delta;\
- dst_2 += dst_delta;\
- }\
+ for (y=0; y<srcSliceH; y+=2){\
+ dst_type *dst_1= (dst_type*)(dst[0] + (y+srcSliceY )*dstStride[0]);\
+ dst_type *dst_2= (dst_type*)(dst[0] + (y+srcSliceY+1)*dstStride[0]);\
+ dst_type av_unused *r, *b;\
+ dst_type *g;\
+ uint8_t *py_1= src[0] + y*srcStride[0];\
+ uint8_t *py_2= py_1 + srcStride[0];\
+ uint8_t *pu= src[1] + (y>>1)*srcStride[1];\
+ uint8_t *pv= src[2] + (y>>1)*srcStride[2];\
+ unsigned int h_size= c->dstW>>3;\
+ while (h_size--) {\
+ int av_unused U, V;\
+ int Y;\
+
+#define EPILOG1(dst_delta)\
+ pu += 4;\
+ pv += 4;\
+ py_1 += 8;\
+ py_2 += 8;\
+ dst_1 += dst_delta;\
+ dst_2 += dst_delta;\
+ }\
+ if (c->dstW & 4) {\
+ int av_unused U, V;\
+ int Y;\
+
+#define EPILOG2()\
+ }\
}\
return srcSliceH;\
}
-PROLOG(yuv2rgb_c_32, uint32_t)
- RGB(0);
- DST1(0);
- DST2(0);
-
- RGB(1);
- DST2(1);
- DST1(1);
-
- RGB(2);
- DST1(2);
- DST2(2);
+#define EPILOG(dst_delta)\
+ EPILOG1(dst_delta)\
+ EPILOG2()
- RGB(3);
- DST2(3);
- DST1(3);
-EPILOG(8)
+PROLOG(yuv2rgb_c_32, uint32_t)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+
+ RGB(2);
+ DST1(2);
+ DST2(2);
+
+ RGB(3);
+ DST2(3);
+ DST1(3);
+EPILOG1(8)
+ RGB(0);
+ DST1(0);
+ DST2(0);
+
+ RGB(1);
+ DST2(1);
+ DST1(1);
+EPILOG2()
PROLOG(yuv2rgb_c_24_rgb, uint8_t)
- RGB(0);
- DST1RGB(0);
- DST2RGB(0);
-
- RGB(1);
- DST2RGB(1);
- DST1RGB(1);
-
- RGB(2);
- DST1RGB(2);
- DST2RGB(2);
-
- RGB(3);
- DST2RGB(3);
- DST1RGB(3);
-EPILOG(24)
+ RGB(0);
+ DST1RGB(0);
+ DST2RGB(0);
+
+ RGB(1);
+ DST2RGB(1);
+ DST1RGB(1);
+
+ RGB(2);
+ DST1RGB(2);
+ DST2RGB(2);
+
+ RGB(3);
+ DST2RGB(3);
+ DST1RGB(3);
+EPILOG1(24)
+ RGB(0);
+ DST1RGB(0);
+ DST2RGB(0);
+
+ RGB(1);
+ DST2RGB(1);
+ DST1RGB(1);
+EPILOG2()
// only trivial mods from yuv2rgb_c_24_rgb
PROLOG(yuv2rgb_c_24_bgr, uint8_t)
- RGB(0);
- DST1BGR(0);
- DST2BGR(0);
-
- RGB(1);
- DST2BGR(1);
- DST1BGR(1);
-
- RGB(2);
- DST1BGR(2);
- DST2BGR(2);
-
- RGB(3);
- DST2BGR(3);
- DST1BGR(3);
-EPILOG(24)
+ RGB(0);
+ DST1BGR(0);
+ DST2BGR(0);
+
+ RGB(1);
+ DST2BGR(1);
+ DST1BGR(1);
+
+ RGB(2);
+ DST1BGR(2);
+ DST2BGR(2);
+
+ RGB(3);
+ DST2BGR(3);
+ DST1BGR(3);
+EPILOG1(24)
+ RGB(0);
+ DST1BGR(0);
+ DST2BGR(0);
+
+ RGB(1);
+ DST2BGR(1);
+ DST1BGR(1);
+EPILOG2()
// This is exactly the same code as yuv2rgb_c_32 except for the types of
// r, g, b, dst_1, dst_2
PROLOG(yuv2rgb_c_16, uint16_t)
- RGB(0);
- DST1(0);
- DST2(0);
+ RGB(0);
+ DST1(0);
+ DST2(0);
- RGB(1);
- DST2(1);
- DST1(1);
+ RGB(1);
+ DST2(1);
+ DST1(1);
- RGB(2);
- DST1(2);
- DST2(2);
+ RGB(2);
+ DST1(2);
+ DST2(2);
- RGB(3);
- DST2(3);
- DST1(3);
+ RGB(3);
+ DST2(3);
+ DST1(3);
EPILOG(8)
// This is exactly the same code as yuv2rgb_c_32 except for the types of
// r, g, b, dst_1, dst_2
PROLOG(yuv2rgb_c_8, uint8_t)
- RGB(0);
- DST1(0);
- DST2(0);
+ RGB(0);
+ DST1(0);
+ DST2(0);
- RGB(1);
- DST2(1);
- DST1(1);
+ RGB(1);
+ DST2(1);
+ DST1(1);
- RGB(2);
- DST1(2);
- DST2(2);
+ RGB(2);
+ DST1(2);
+ DST2(2);
- RGB(3);
- DST2(3);
- DST1(3);
+ RGB(3);
+ DST2(3);
+ DST1(3);
EPILOG(8)
// r, g, b, dst_1, dst_2
PROLOG(yuv2rgb_c_8_ordered_dither, uint8_t)
- const uint8_t *d32= dither_8x8_32[y&7];
- const uint8_t *d64= dither_8x8_73[y&7];
-#define DST1bpp8(i,o) \
- Y = py_1[2*i]; \
- dst_1[2*i] = r[Y+d32[0+o]] + g[Y+d32[0+o]] + b[Y+d64[0+o]]; \
- Y = py_1[2*i+1]; \
- dst_1[2*i+1] = r[Y+d32[1+o]] + g[Y+d32[1+o]] + b[Y+d64[1+o]];
-
-#define DST2bpp8(i,o) \
- Y = py_2[2*i]; \
- dst_2[2*i] = r[Y+d32[8+o]] + g[Y+d32[8+o]] + b[Y+d64[8+o]]; \
- Y = py_2[2*i+1]; \
- dst_2[2*i+1] = r[Y+d32[9+o]] + g[Y+d32[9+o]] + b[Y+d64[9+o]];
-
-
- RGB(0);
- DST1bpp8(0,0);
- DST2bpp8(0,0);
-
- RGB(1);
- DST2bpp8(1,2);
- DST1bpp8(1,2);
-
- RGB(2);
- DST1bpp8(2,4);
- DST2bpp8(2,4);
-
- RGB(3);
- DST2bpp8(3,6);
- DST1bpp8(3,6);
+ const uint8_t *d32= dither_8x8_32[y&7];
+ const uint8_t *d64= dither_8x8_73[y&7];
+#define DST1bpp8(i,o) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y+d32[0+o]] + g[Y+d32[0+o]] + b[Y+d64[0+o]]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y+d32[1+o]] + g[Y+d32[1+o]] + b[Y+d64[1+o]];
+
+#define DST2bpp8(i,o) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y+d32[8+o]] + g[Y+d32[8+o]] + b[Y+d64[8+o]]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y+d32[9+o]] + g[Y+d32[9+o]] + b[Y+d64[9+o]];
+
+
+ RGB(0);
+ DST1bpp8(0,0);
+ DST2bpp8(0,0);
+
+ RGB(1);
+ DST2bpp8(1,2);
+ DST1bpp8(1,2);
+
+ RGB(2);
+ DST1bpp8(2,4);
+ DST2bpp8(2,4);
+
+ RGB(3);
+ DST2bpp8(3,6);
+ DST1bpp8(3,6);
EPILOG(8)
// This is exactly the same code as yuv2rgb_c_32 except for the types of
// r, g, b, dst_1, dst_2
PROLOG(yuv2rgb_c_4, uint8_t)
- int acc;
-#define DST1_4(i) \
- Y = py_1[2*i]; \
- acc = r[Y] + g[Y] + b[Y]; \
- Y = py_1[2*i+1]; \
- acc |= (r[Y] + g[Y] + b[Y])<<4;\
- dst_1[i] = acc;
-
-#define DST2_4(i) \
- Y = py_2[2*i]; \
- acc = r[Y] + g[Y] + b[Y]; \
- Y = py_2[2*i+1]; \
- acc |= (r[Y] + g[Y] + b[Y])<<4;\
- dst_2[i] = acc;
-
- RGB(0);
- DST1_4(0);
- DST2_4(0);
-
- RGB(1);
- DST2_4(1);
- DST1_4(1);
-
- RGB(2);
- DST1_4(2);
- DST2_4(2);
-
- RGB(3);
- DST2_4(3);
- DST1_4(3);
+ int acc;
+#define DST1_4(i) \
+ Y = py_1[2*i]; \
+ acc = r[Y] + g[Y] + b[Y]; \
+ Y = py_1[2*i+1]; \
+ acc |= (r[Y] + g[Y] + b[Y])<<4; \
+ dst_1[i] = acc;
+
+#define DST2_4(i) \
+ Y = py_2[2*i]; \
+ acc = r[Y] + g[Y] + b[Y]; \
+ Y = py_2[2*i+1]; \
+ acc |= (r[Y] + g[Y] + b[Y])<<4; \
+ dst_2[i] = acc;
+
+ RGB(0);
+ DST1_4(0);
+ DST2_4(0);
+
+ RGB(1);
+ DST2_4(1);
+ DST1_4(1);
+
+ RGB(2);
+ DST1_4(2);
+ DST2_4(2);
+
+ RGB(3);
+ DST2_4(3);
+ DST1_4(3);
EPILOG(4)
PROLOG(yuv2rgb_c_4_ordered_dither, uint8_t)
- const uint8_t *d64= dither_8x8_73[y&7];
- const uint8_t *d128=dither_8x8_220[y&7];
- int acc;
-
-#define DST1bpp4(i,o) \
- Y = py_1[2*i]; \
- acc = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
- Y = py_1[2*i+1]; \
- acc |= (r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]])<<4;\
- dst_1[i]= acc;
-
-#define DST2bpp4(i,o) \
- Y = py_2[2*i]; \
- acc = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
- Y = py_2[2*i+1]; \
- acc |= (r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]])<<4;\
- dst_2[i]= acc;
-
-
- RGB(0);
- DST1bpp4(0,0);
- DST2bpp4(0,0);
-
- RGB(1);
- DST2bpp4(1,2);
- DST1bpp4(1,2);
-
- RGB(2);
- DST1bpp4(2,4);
- DST2bpp4(2,4);
-
- RGB(3);
- DST2bpp4(3,6);
- DST1bpp4(3,6);
+ const uint8_t *d64= dither_8x8_73[y&7];
+ const uint8_t *d128=dither_8x8_220[y&7];
+ int acc;
+
+#define DST1bpp4(i,o) \
+ Y = py_1[2*i]; \
+ acc = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ acc |= (r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]])<<4; \
+ dst_1[i]= acc;
+
+#define DST2bpp4(i,o) \
+ Y = py_2[2*i]; \
+ acc = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ acc |= (r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]])<<4; \
+ dst_2[i]= acc;
+
+
+ RGB(0);
+ DST1bpp4(0,0);
+ DST2bpp4(0,0);
+
+ RGB(1);
+ DST2bpp4(1,2);
+ DST1bpp4(1,2);
+
+ RGB(2);
+ DST1bpp4(2,4);
+ DST2bpp4(2,4);
+
+ RGB(3);
+ DST2bpp4(3,6);
+ DST1bpp4(3,6);
EPILOG(4)
// This is exactly the same code as yuv2rgb_c_32 except for the types of
// r, g, b, dst_1, dst_2
PROLOG(yuv2rgb_c_4b, uint8_t)
- RGB(0);
- DST1(0);
- DST2(0);
+ RGB(0);
+ DST1(0);
+ DST2(0);
- RGB(1);
- DST2(1);
- DST1(1);
+ RGB(1);
+ DST2(1);
+ DST1(1);
- RGB(2);
- DST1(2);
- DST2(2);
+ RGB(2);
+ DST1(2);
+ DST2(2);
- RGB(3);
- DST2(3);
- DST1(3);
+ RGB(3);
+ DST2(3);
+ DST1(3);
EPILOG(8)
PROLOG(yuv2rgb_c_4b_ordered_dither, uint8_t)
- const uint8_t *d64= dither_8x8_73[y&7];
- const uint8_t *d128=dither_8x8_220[y&7];
+ const uint8_t *d64= dither_8x8_73[y&7];
+ const uint8_t *d128=dither_8x8_220[y&7];
-#define DST1bpp4b(i,o) \
- Y = py_1[2*i]; \
- dst_1[2*i] = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
- Y = py_1[2*i+1]; \
- dst_1[2*i+1] = r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]];
+#define DST1bpp4b(i,o) \
+ Y = py_1[2*i]; \
+ dst_1[2*i] = r[Y+d128[0+o]] + g[Y+d64[0+o]] + b[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ dst_1[2*i+1] = r[Y+d128[1+o]] + g[Y+d64[1+o]] + b[Y+d128[1+o]];
-#define DST2bpp4b(i,o) \
- Y = py_2[2*i]; \
- dst_2[2*i] = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
- Y = py_2[2*i+1]; \
- dst_2[2*i+1] = r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]];
+#define DST2bpp4b(i,o) \
+ Y = py_2[2*i]; \
+ dst_2[2*i] = r[Y+d128[8+o]] + g[Y+d64[8+o]] + b[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ dst_2[2*i+1] = r[Y+d128[9+o]] + g[Y+d64[9+o]] + b[Y+d128[9+o]];
- RGB(0);
- DST1bpp4b(0,0);
- DST2bpp4b(0,0);
+ RGB(0);
+ DST1bpp4b(0,0);
+ DST2bpp4b(0,0);
- RGB(1);
- DST2bpp4b(1,2);
- DST1bpp4b(1,2);
+ RGB(1);
+ DST2bpp4b(1,2);
+ DST1bpp4b(1,2);
- RGB(2);
- DST1bpp4b(2,4);
- DST2bpp4b(2,4);
+ RGB(2);
+ DST1bpp4b(2,4);
+ DST2bpp4b(2,4);
- RGB(3);
- DST2bpp4b(3,6);
- DST1bpp4b(3,6);
+ RGB(3);
+ DST2bpp4b(3,6);
+ DST1bpp4b(3,6);
EPILOG(8)
PROLOG(yuv2rgb_c_1_ordered_dither, uint8_t)
- const uint8_t *d128=dither_8x8_220[y&7];
- char out_1=0, out_2=0;
- g= c->table_gU[128] + c->table_gV[128];
-
-#define DST1bpp1(i,o) \
- Y = py_1[2*i]; \
- out_1+= out_1 + g[Y+d128[0+o]]; \
- Y = py_1[2*i+1]; \
- out_1+= out_1 + g[Y+d128[1+o]];
-
-#define DST2bpp1(i,o) \
- Y = py_2[2*i]; \
- out_2+= out_2 + g[Y+d128[8+o]]; \
- Y = py_2[2*i+1]; \
- out_2+= out_2 + g[Y+d128[9+o]];
-
- DST1bpp1(0,0);
- DST2bpp1(0,0);
-
- DST2bpp1(1,2);
- DST1bpp1(1,2);
-
- DST1bpp1(2,4);
- DST2bpp1(2,4);
-
- DST2bpp1(3,6);
- DST1bpp1(3,6);
-
- dst_1[0]= out_1;
- dst_2[0]= out_2;
+ const uint8_t *d128=dither_8x8_220[y&7];
+ char out_1=0, out_2=0;
+ g= c->table_gU[128] + c->table_gV[128];
+
+#define DST1bpp1(i,o) \
+ Y = py_1[2*i]; \
+ out_1+= out_1 + g[Y+d128[0+o]]; \
+ Y = py_1[2*i+1]; \
+ out_1+= out_1 + g[Y+d128[1+o]];
+
+#define DST2bpp1(i,o) \
+ Y = py_2[2*i]; \
+ out_2+= out_2 + g[Y+d128[8+o]]; \
+ Y = py_2[2*i+1]; \
+ out_2+= out_2 + g[Y+d128[9+o]];
+
+ DST1bpp1(0,0);
+ DST2bpp1(0,0);
+
+ DST2bpp1(1,2);
+ DST1bpp1(1,2);
+
+ DST1bpp1(2,4);
+ DST2bpp1(2,4);
+
+ DST2bpp1(3,6);
+ DST1bpp1(3,6);
+
+ dst_1[0]= out_1;
+ dst_2[0]= out_2;
EPILOG(1)
SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
{
#if defined(HAVE_MMX2) || defined(HAVE_MMX)
- if(c->flags & SWS_CPU_CAPS_MMX2){
- switch(c->dstFormat){
- case PIX_FMT_RGB32: return yuv420_rgb32_MMX2;
- case PIX_FMT_BGR24: return yuv420_rgb24_MMX2;
- case PIX_FMT_BGR565: return yuv420_rgb16_MMX2;
- case PIX_FMT_BGR555: return yuv420_rgb15_MMX2;
- }
+ if (c->flags & SWS_CPU_CAPS_MMX2){
+ switch(c->dstFormat){
+ case PIX_FMT_RGB32: return yuv420_rgb32_MMX2;
+ case PIX_FMT_BGR24: return yuv420_rgb24_MMX2;
+ case PIX_FMT_BGR565: return yuv420_rgb16_MMX2;
+ case PIX_FMT_BGR555: return yuv420_rgb15_MMX2;
+ }
}
- if(c->flags & SWS_CPU_CAPS_MMX){
- switch(c->dstFormat){
- case PIX_FMT_RGB32: return yuv420_rgb32_MMX;
- case PIX_FMT_BGR24: return yuv420_rgb24_MMX;
- case PIX_FMT_BGR565: return yuv420_rgb16_MMX;
- case PIX_FMT_BGR555: return yuv420_rgb15_MMX;
- }
+ if (c->flags & SWS_CPU_CAPS_MMX){
+ switch(c->dstFormat){
+ case PIX_FMT_RGB32: return yuv420_rgb32_MMX;
+ case PIX_FMT_BGR24: return yuv420_rgb24_MMX;
+ case PIX_FMT_BGR565: return yuv420_rgb16_MMX;
+ case PIX_FMT_BGR555: return yuv420_rgb15_MMX;
+ }
+ }
+#endif
+#ifdef HAVE_VIS
+ {
+ SwsFunc t= yuv2rgb_init_vis(c);
+ if (t) return t;
}
#endif
#ifdef HAVE_MLIB
{
- SwsFunc t= yuv2rgb_init_mlib(c);
- if(t) return t;
+ SwsFunc t= yuv2rgb_init_mlib(c);
+ if (t) return t;
}
#endif
#ifdef HAVE_ALTIVEC
if (c->flags & SWS_CPU_CAPS_ALTIVEC)
{
- SwsFunc t = yuv2rgb_init_altivec(c);
- if(t) return t;
+ SwsFunc t = yuv2rgb_init_altivec(c);
+ if (t) return t;
+ }
+#endif
+
+#ifdef ARCH_BFIN
+ if (c->flags & SWS_CPU_CAPS_BFIN)
+ {
+ SwsFunc t = ff_bfin_yuv2rgb_get_func_ptr (c);
+ if (t) return t;
}
#endif
@@ -630,7 +669,7 @@ SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
case PIX_FMT_BGR4_BYTE: return yuv2rgb_c_4b_ordered_dither;
case PIX_FMT_MONOBLACK: return yuv2rgb_c_1_ordered_dither;
default:
- assert(0);
+ assert(0);
}
return NULL;
}
@@ -638,13 +677,13 @@ SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
static int div_round (int dividend, int divisor)
{
if (dividend > 0)
- return (dividend + (divisor>>1)) / divisor;
+ return (dividend + (divisor>>1)) / divisor;
else
- return -((-dividend + (divisor>>1)) / divisor);
+ return -((-dividend + (divisor>>1)) / divisor);
}
int yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[4], int fullRange, int brightness, int contrast, int saturation)
-{
+{
const int isRgb = isBGR(c->dstFormat);
const int bpp = fmt_depth(c->dstFormat);
int i;
@@ -667,16 +706,16 @@ int yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[4], int fullRange,
int64_t oy = 0;
//printf("%lld %lld %lld %lld %lld\n", cy, crv, cbu, cgu, cgv);
- if(!fullRange){
- cy= (cy*255) / 219;
- oy= 16<<16;
+ if (!fullRange){
+ cy= (cy*255) / 219;
+ oy= 16<<16;
}else{
crv= (crv*224) / 255;
cbu= (cbu*224) / 255;
cgu= (cgu*224) / 255;
cgv= (cgv*224) / 255;
}
-
+
cy = (cy *contrast )>>16;
crv= (crv*contrast * saturation)>>32;
cbu= (cbu*contrast * saturation)>>32;
@@ -686,163 +725,163 @@ int yuv2rgb_c_init_tables (SwsContext *c, const int inv_table[4], int fullRange,
oy -= 256*brightness;
for (i = 0; i < 1024; i++) {
- int j;
+ int j;
- j= (cy*(((i - 384)<<16) - oy) + (1<<31))>>32;
- j = (j < 0) ? 0 : ((j > 255) ? 255 : j);
- table_Y[i] = j;
+ j= (cy*(((i - 384)<<16) - oy) + (1<<31))>>32;
+ j = (j < 0) ? 0 : ((j > 255) ? 255 : j);
+ table_Y[i] = j;
}
switch (bpp) {
case 32:
- table_start= table_32 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint32_t));
+ table_start= table_32 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint32_t));
- entry_size = sizeof (uint32_t);
- table_r = table_32 + 197;
- table_b = table_32 + 197 + 685;
- table_g = table_32 + 197 + 2*682;
+ entry_size = sizeof (uint32_t);
+ table_r = table_32 + 197;
+ table_b = table_32 + 197 + 685;
+ table_g = table_32 + 197 + 2*682;
- for (i = -197; i < 256+197; i++)
- ((uint32_t *)table_r)[i] = table_Y[i+384] << (isRgb ? 16 : 0);
- for (i = -132; i < 256+132; i++)
- ((uint32_t *)table_g)[i] = table_Y[i+384] << 8;
- for (i = -232; i < 256+232; i++)
- ((uint32_t *)table_b)[i] = table_Y[i+384] << (isRgb ? 0 : 16);
- break;
+ for (i = -197; i < 256+197; i++)
+ ((uint32_t *)table_r)[i] = table_Y[i+384] << (isRgb ? 16 : 0);
+ for (i = -132; i < 256+132; i++)
+ ((uint32_t *)table_g)[i] = table_Y[i+384] << 8;
+ for (i = -232; i < 256+232; i++)
+ ((uint32_t *)table_b)[i] = table_Y[i+384] << (isRgb ? 0 : 16);
+ break;
case 24:
- table_start= table_8 = av_malloc ((256 + 2*232) * sizeof (uint8_t));
+ table_start= table_8 = av_malloc ((256 + 2*232) * sizeof (uint8_t));
- entry_size = sizeof (uint8_t);
- table_r = table_g = table_b = table_8 + 232;
+ entry_size = sizeof (uint8_t);
+ table_r = table_g = table_b = table_8 + 232;
- for (i = -232; i < 256+232; i++)
- ((uint8_t * )table_b)[i] = table_Y[i+384];
- break;
+ for (i = -232; i < 256+232; i++)
+ ((uint8_t * )table_b)[i] = table_Y[i+384];
+ break;
case 15:
case 16:
- table_start= table_16 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint16_t));
+ table_start= table_16 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint16_t));
- entry_size = sizeof (uint16_t);
- table_r = table_16 + 197;
- table_b = table_16 + 197 + 685;
- table_g = table_16 + 197 + 2*682;
+ entry_size = sizeof (uint16_t);
+ table_r = table_16 + 197;
+ table_b = table_16 + 197 + 685;
+ table_g = table_16 + 197 + 2*682;
- for (i = -197; i < 256+197; i++) {
- int j = table_Y[i+384] >> 3;
+ for (i = -197; i < 256+197; i++) {
+ int j = table_Y[i+384] >> 3;
- if (isRgb)
- j <<= ((bpp==16) ? 11 : 10);
+ if (isRgb)
+ j <<= ((bpp==16) ? 11 : 10);
- ((uint16_t *)table_r)[i] = j;
- }
- for (i = -132; i < 256+132; i++) {
- int j = table_Y[i+384] >> ((bpp==16) ? 2 : 3);
+ ((uint16_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = table_Y[i+384] >> ((bpp==16) ? 2 : 3);
- ((uint16_t *)table_g)[i] = j << 5;
- }
- for (i = -232; i < 256+232; i++) {
- int j = table_Y[i+384] >> 3;
+ ((uint16_t *)table_g)[i] = j << 5;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j = table_Y[i+384] >> 3;
- if (!isRgb)
- j <<= ((bpp==16) ? 11 : 10);
+ if (!isRgb)
+ j <<= ((bpp==16) ? 11 : 10);
- ((uint16_t *)table_b)[i] = j;
- }
- break;
+ ((uint16_t *)table_b)[i] = j;
+ }
+ break;
case 8:
- table_start= table_332 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
+ table_start= table_332 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
- entry_size = sizeof (uint8_t);
- table_r = table_332 + 197;
- table_b = table_332 + 197 + 685;
- table_g = table_332 + 197 + 2*682;
+ entry_size = sizeof (uint8_t);
+ table_r = table_332 + 197;
+ table_b = table_332 + 197 + 685;
+ table_g = table_332 + 197 + 2*682;
- for (i = -197; i < 256+197; i++) {
- int j = (table_Y[i+384 - 16] + 18)/36;
+ for (i = -197; i < 256+197; i++) {
+ int j = (table_Y[i+384 - 16] + 18)/36;
- if (isRgb)
- j <<= 5;
+ if (isRgb)
+ j <<= 5;
- ((uint8_t *)table_r)[i] = j;
- }
- for (i = -132; i < 256+132; i++) {
- int j = (table_Y[i+384 - 16] + 18)/36;
+ ((uint8_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = (table_Y[i+384 - 16] + 18)/36;
- if (!isRgb)
- j <<= 1;
+ if (!isRgb)
+ j <<= 1;
- ((uint8_t *)table_g)[i] = j << 2;
- }
- for (i = -232; i < 256+232; i++) {
- int j = (table_Y[i+384 - 37] + 43)/85;
+ ((uint8_t *)table_g)[i] = j << 2;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j = (table_Y[i+384 - 37] + 43)/85;
- if (!isRgb)
- j <<= 6;
+ if (!isRgb)
+ j <<= 6;
- ((uint8_t *)table_b)[i] = j;
- }
- break;
+ ((uint8_t *)table_b)[i] = j;
+ }
+ break;
case 4:
case 4|128:
- table_start= table_121 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
+ table_start= table_121 = av_malloc ((197 + 2*682 + 256 + 132) * sizeof (uint8_t));
- entry_size = sizeof (uint8_t);
- table_r = table_121 + 197;
- table_b = table_121 + 197 + 685;
- table_g = table_121 + 197 + 2*682;
+ entry_size = sizeof (uint8_t);
+ table_r = table_121 + 197;
+ table_b = table_121 + 197 + 685;
+ table_g = table_121 + 197 + 2*682;
- for (i = -197; i < 256+197; i++) {
- int j = table_Y[i+384 - 110] >> 7;
+ for (i = -197; i < 256+197; i++) {
+ int j = table_Y[i+384 - 110] >> 7;
- if (isRgb)
- j <<= 3;
+ if (isRgb)
+ j <<= 3;
- ((uint8_t *)table_r)[i] = j;
- }
- for (i = -132; i < 256+132; i++) {
- int j = (table_Y[i+384 - 37]+ 43)/85;
+ ((uint8_t *)table_r)[i] = j;
+ }
+ for (i = -132; i < 256+132; i++) {
+ int j = (table_Y[i+384 - 37]+ 43)/85;
- ((uint8_t *)table_g)[i] = j << 1;
- }
- for (i = -232; i < 256+232; i++) {
- int j =table_Y[i+384 - 110] >> 7;
+ ((uint8_t *)table_g)[i] = j << 1;
+ }
+ for (i = -232; i < 256+232; i++) {
+ int j =table_Y[i+384 - 110] >> 7;
- if (!isRgb)
- j <<= 3;
+ if (!isRgb)
+ j <<= 3;
- ((uint8_t *)table_b)[i] = j;
- }
- break;
+ ((uint8_t *)table_b)[i] = j;
+ }
+ break;
case 1:
- table_start= table_1 = av_malloc (256*2 * sizeof (uint8_t));
+ table_start= table_1 = av_malloc (256*2 * sizeof (uint8_t));
- entry_size = sizeof (uint8_t);
- table_g = table_1;
- table_r = table_b = NULL;
+ entry_size = sizeof (uint8_t);
+ table_g = table_1;
+ table_r = table_b = NULL;
- for (i = 0; i < 256+256; i++) {
- int j = table_Y[i + 384 - 110]>>7;
+ for (i = 0; i < 256+256; i++) {
+ int j = table_Y[i + 384 - 110]>>7;
- ((uint8_t *)table_g)[i] = j;
- }
- break;
+ ((uint8_t *)table_g)[i] = j;
+ }
+ break;
default:
- table_start= NULL;
- av_log(c, AV_LOG_ERROR, "%ibpp not supported by yuv2rgb\n", bpp);
- //free mem?
- return -1;
+ table_start= NULL;
+ av_log(c, AV_LOG_ERROR, "%ibpp not supported by yuv2rgb\n", bpp);
+ //free mem?
+ return -1;
}
for (i = 0; i < 256; i++) {
- c->table_rV[i] = (uint8_t *)table_r + entry_size * div_round (crv * (i-128), 76309);
- c->table_gU[i] = (uint8_t *)table_g + entry_size * div_round (cgu * (i-128), 76309);
- c->table_gV[i] = entry_size * div_round (cgv * (i-128), 76309);
- c->table_bU[i] = (uint8_t *)table_b + entry_size * div_round (cbu * (i-128), 76309);
+ c->table_rV[i] = (uint8_t *)table_r + entry_size * div_round (crv * (i-128), 76309);
+ c->table_gU[i] = (uint8_t *)table_g + entry_size * div_round (cgu * (i-128), 76309);
+ c->table_gV[i] = entry_size * div_round (cgv * (i-128), 76309);
+ c->table_bU[i] = (uint8_t *)table_b + entry_size * div_round (cbu * (i-128), 76309);
}
av_free(c->yuvTable);
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_altivec.c b/contrib/ffmpeg/libswscale/yuv2rgb_altivec.c
index 72e418e8d..ebc9a6497 100644
--- a/contrib/ffmpeg/libswscale/yuv2rgb_altivec.c
+++ b/contrib/ffmpeg/libswscale/yuv2rgb_altivec.c
@@ -1,7 +1,7 @@
/*
marc.hoffman@analog.com March 8, 2004
- Altivec Acceleration for Color Space Conversion revision 0.2
+ AltiVec acceleration for colorspace conversion revision 0.2
convert I420 YV12 to RGB in various formats,
it rejects images that are not in 420 formats
@@ -59,7 +59,7 @@
NOTE quartz vo driver ARGB32_to_RGB24 consumes 30% of the processor
- Integrated luma prescaling adjustment for saturation/contrast/brightness adjustment.
+ Integrated luma prescaling adjustment for saturation/contrast/brightness adjustment.
*/
/*
@@ -77,7 +77,7 @@
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
@@ -139,70 +139,70 @@ typedef signed char sbyte;
static
const vector unsigned char
perm_rgb_0 = (const vector unsigned char)AVV(0x00,0x01,0x10,0x02,0x03,0x11,0x04,0x05,
- 0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a),
+ 0x12,0x06,0x07,0x13,0x08,0x09,0x14,0x0a),
perm_rgb_1 = (const vector unsigned char)AVV(0x0b,0x15,0x0c,0x0d,0x16,0x0e,0x0f,0x17,
- 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f),
+ 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f),
perm_rgb_2 = (const vector unsigned char)AVV(0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,
- 0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05),
+ 0x00,0x01,0x18,0x02,0x03,0x19,0x04,0x05),
perm_rgb_3 = (const vector unsigned char)AVV(0x1a,0x06,0x07,0x1b,0x08,0x09,0x1c,0x0a,
- 0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f);
-
-#define vec_merge3(x2,x1,x0,y0,y1,y2) \
-do { \
- typeof(x0) o0,o2,o3; \
- o0 = vec_mergeh (x0,x1); \
- y0 = vec_perm (o0, x2, perm_rgb_0);\
- o2 = vec_perm (o0, x2, perm_rgb_1);\
- o3 = vec_mergel (x0,x1); \
- y1 = vec_perm (o3,o2,perm_rgb_2); \
- y2 = vec_perm (o3,o2,perm_rgb_3); \
+ 0x0b,0x1d,0x0c,0x0d,0x1e,0x0e,0x0f,0x1f);
+
+#define vec_merge3(x2,x1,x0,y0,y1,y2) \
+do { \
+ typeof(x0) o0,o2,o3; \
+ o0 = vec_mergeh (x0,x1); \
+ y0 = vec_perm (o0, x2, perm_rgb_0); \
+ o2 = vec_perm (o0, x2, perm_rgb_1); \
+ o3 = vec_mergel (x0,x1); \
+ y1 = vec_perm (o3,o2,perm_rgb_2); \
+ y2 = vec_perm (o3,o2,perm_rgb_3); \
} while(0)
-#define vec_mstbgr24(x0,x1,x2,ptr) \
-do { \
- typeof(x0) _0,_1,_2; \
- vec_merge3 (x0,x1,x2,_0,_1,_2); \
- vec_st (_0, 0, ptr++); \
- vec_st (_1, 0, ptr++); \
- vec_st (_2, 0, ptr++); \
+#define vec_mstbgr24(x0,x1,x2,ptr) \
+do { \
+ typeof(x0) _0,_1,_2; \
+ vec_merge3 (x0,x1,x2,_0,_1,_2); \
+ vec_st (_0, 0, ptr++); \
+ vec_st (_1, 0, ptr++); \
+ vec_st (_2, 0, ptr++); \
} while (0);
-#define vec_mstrgb24(x0,x1,x2,ptr) \
-do { \
- typeof(x0) _0,_1,_2; \
- vec_merge3 (x2,x1,x0,_0,_1,_2); \
- vec_st (_0, 0, ptr++); \
- vec_st (_1, 0, ptr++); \
- vec_st (_2, 0, ptr++); \
+#define vec_mstrgb24(x0,x1,x2,ptr) \
+do { \
+ typeof(x0) _0,_1,_2; \
+ vec_merge3 (x2,x1,x0,_0,_1,_2); \
+ vec_st (_0, 0, ptr++); \
+ vec_st (_1, 0, ptr++); \
+ vec_st (_2, 0, ptr++); \
} while (0);
/* pack the pixels in rgb0 format
msb R
lsb 0
*/
-#define vec_mstrgb32(T,x0,x1,x2,x3,ptr) \
-do { \
- T _0,_1,_2,_3; \
- _0 = vec_mergeh (x0,x1); \
- _1 = vec_mergeh (x2,x3); \
- _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
- _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
- vec_st (_2, 0*16, (T *)ptr); \
- vec_st (_3, 1*16, (T *)ptr); \
- _0 = vec_mergel (x0,x1); \
- _1 = vec_mergel (x2,x3); \
- _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
- _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
- vec_st (_2, 2*16, (T *)ptr); \
- vec_st (_3, 3*16, (T *)ptr); \
- ptr += 4; \
+#define vec_mstrgb32(T,x0,x1,x2,x3,ptr) \
+do { \
+ T _0,_1,_2,_3; \
+ _0 = vec_mergeh (x0,x1); \
+ _1 = vec_mergeh (x2,x3); \
+ _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
+ _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
+ vec_st (_2, 0*16, (T *)ptr); \
+ vec_st (_3, 1*16, (T *)ptr); \
+ _0 = vec_mergel (x0,x1); \
+ _1 = vec_mergel (x2,x3); \
+ _2 = (T)vec_mergeh ((vector unsigned short)_0,(vector unsigned short)_1); \
+ _3 = (T)vec_mergel ((vector unsigned short)_0,(vector unsigned short)_1); \
+ vec_st (_2, 2*16, (T *)ptr); \
+ vec_st (_3, 3*16, (T *)ptr); \
+ ptr += 4; \
} while (0);
/*
| 1 0 1.4021 | | Y |
| 1 -0.3441 -0.7142 |x| Cb|
- | 1 1.7718 0 | | Cr|
+ | 1 1.7718 0 | | Cr|
Y: [-128 127]
@@ -216,51 +216,51 @@ do { \
#define vec_unh(x) \
- (vector signed short) \
- vec_perm(x,(typeof(x))AVV(0),\
- (vector unsigned char)AVV(0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\
- 0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07))
+ (vector signed short) \
+ vec_perm(x,(typeof(x))AVV(0),\
+ (vector unsigned char)AVV(0x10,0x00,0x10,0x01,0x10,0x02,0x10,0x03,\
+ 0x10,0x04,0x10,0x05,0x10,0x06,0x10,0x07))
#define vec_unl(x) \
- (vector signed short) \
- vec_perm(x,(typeof(x))AVV(0),\
- (vector unsigned char)AVV(0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\
- 0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F))
+ (vector signed short) \
+ vec_perm(x,(typeof(x))AVV(0),\
+ (vector unsigned char)AVV(0x10,0x08,0x10,0x09,0x10,0x0A,0x10,0x0B,\
+ 0x10,0x0C,0x10,0x0D,0x10,0x0E,0x10,0x0F))
#define vec_clip_s16(x) \
- vec_max (vec_min (x, (vector signed short)AVV(235,235,235,235,235,235,235,235)),\
- (vector signed short)AVV(16, 16, 16, 16, 16, 16, 16, 16 ))
+ vec_max (vec_min (x, (vector signed short)AVV(235,235,235,235,235,235,235,235)),\
+ (vector signed short)AVV( 16, 16, 16, 16, 16, 16, 16, 16))
#define vec_packclp(x,y) \
- (vector unsigned char)vec_packs \
- ((vector unsigned short)vec_max (x,(vector signed short) AVV(0)), \
- (vector unsigned short)vec_max (y,(vector signed short) AVV(0)))
+ (vector unsigned char)vec_packs \
+ ((vector unsigned short)vec_max (x,(vector signed short) AVV(0)), \
+ (vector unsigned short)vec_max (y,(vector signed short) AVV(0)))
//#define out_pixels(a,b,c,ptr) vec_mstrgb32(typeof(a),((typeof (a))AVV(0)),a,a,a,ptr)
static inline void cvtyuvtoRGB (SwsContext *c,
- vector signed short Y, vector signed short U, vector signed short V,
- vector signed short *R, vector signed short *G, vector signed short *B)
+ vector signed short Y, vector signed short U, vector signed short V,
+ vector signed short *R, vector signed short *G, vector signed short *B)
{
- vector signed short vx,ux,uvx;
+ vector signed short vx,ux,uvx;
- Y = vec_mradds (Y, c->CY, c->OY);
- U = vec_sub (U,(vector signed short)
- vec_splat((vector signed short)AVV(128),0));
- V = vec_sub (V,(vector signed short)
- vec_splat((vector signed short)AVV(128),0));
+ Y = vec_mradds (Y, c->CY, c->OY);
+ U = vec_sub (U,(vector signed short)
+ vec_splat((vector signed short)AVV(128),0));
+ V = vec_sub (V,(vector signed short)
+ vec_splat((vector signed short)AVV(128),0));
- // ux = (CBU*(u<<c->CSHIFT)+0x4000)>>15;
- ux = vec_sl (U, c->CSHIFT);
- *B = vec_mradds (ux, c->CBU, Y);
+ // ux = (CBU*(u<<c->CSHIFT)+0x4000)>>15;
+ ux = vec_sl (U, c->CSHIFT);
+ *B = vec_mradds (ux, c->CBU, Y);
- // vx = (CRV*(v<<c->CSHIFT)+0x4000)>>15;
- vx = vec_sl (V, c->CSHIFT);
- *R = vec_mradds (vx, c->CRV, Y);
+ // vx = (CRV*(v<<c->CSHIFT)+0x4000)>>15;
+ vx = vec_sl (V, c->CSHIFT);
+ *R = vec_mradds (vx, c->CRV, Y);
- // uvx = ((CGU*u) + (CGV*v))>>15;
- uvx = vec_mradds (U, c->CGU, Y);
- *G = vec_mradds (V, c->CGV, uvx);
+ // uvx = ((CGU*u) + (CGV*v))>>15;
+ uvx = vec_mradds (U, c->CGU, Y);
+ *G = vec_mradds (V, c->CGV, uvx);
}
@@ -271,164 +271,168 @@ static inline void cvtyuvtoRGB (SwsContext *c,
*/
-#define DEFCSP420_CVT(name,out_pixels) \
-static int altivec_##name (SwsContext *c, \
- unsigned char **in, int *instrides, \
- int srcSliceY, int srcSliceH, \
- unsigned char **oplanes, int *outstrides) \
-{ \
- int w = c->srcW; \
- int h = srcSliceH; \
- int i,j; \
- int instrides_scl[3]; \
- vector unsigned char y0,y1; \
- \
- vector signed char u,v; \
- \
- vector signed short Y0,Y1,Y2,Y3; \
- vector signed short U,V; \
- vector signed short vx,ux,uvx; \
- vector signed short vx0,ux0,uvx0; \
- vector signed short vx1,ux1,uvx1; \
- vector signed short R0,G0,B0; \
- vector signed short R1,G1,B1; \
- vector unsigned char R,G,B; \
- \
- vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \
- vector unsigned char align_perm; \
- \
- vector signed short \
- lCY = c->CY, \
- lOY = c->OY, \
- lCRV = c->CRV, \
- lCBU = c->CBU, \
- lCGU = c->CGU, \
- lCGV = c->CGV; \
- \
- vector unsigned short lCSHIFT = c->CSHIFT; \
- \
- ubyte *y1i = in[0]; \
- ubyte *y2i = in[0]+instrides[0]; \
- ubyte *ui = in[1]; \
- ubyte *vi = in[2]; \
- \
- vector unsigned char *oute \
- = (vector unsigned char *) \
- (oplanes[0]+srcSliceY*outstrides[0]); \
- vector unsigned char *outo \
- = (vector unsigned char *) \
- (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); \
- \
- \
- instrides_scl[0] = instrides[0]*2-w; /* the loop moves y{1,2}i by w */ \
- instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ \
- instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ \
- \
- \
- for (i=0;i<h/2;i++) { \
- vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); \
- vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); \
- \
- for (j=0;j<w/16;j++) { \
- \
- y1ivP = (vector unsigned char *)y1i; \
- y2ivP = (vector unsigned char *)y2i; \
- uivP = (vector unsigned char *)ui; \
- vivP = (vector unsigned char *)vi; \
- \
- align_perm = vec_lvsl (0, y1i); \
- y0 = (vector unsigned char)vec_perm (y1ivP[0], y1ivP[1], align_perm);\
- \
- align_perm = vec_lvsl (0, y2i); \
- y1 = (vector unsigned char)vec_perm (y2ivP[0], y2ivP[1], align_perm);\
- \
- align_perm = vec_lvsl (0, ui); \
- u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm); \
- \
- align_perm = vec_lvsl (0, vi); \
- v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm); \
- \
- u = (vector signed char) \
- vec_sub (u,(vector signed char) \
- vec_splat((vector signed char)AVV(128),0));\
- v = (vector signed char) \
- vec_sub (v,(vector signed char) \
- vec_splat((vector signed char)AVV(128),0));\
- \
- U = vec_unpackh (u); \
- V = vec_unpackh (v); \
- \
- \
- Y0 = vec_unh (y0); \
- Y1 = vec_unl (y0); \
- Y2 = vec_unh (y1); \
- Y3 = vec_unl (y1); \
- \
- Y0 = vec_mradds (Y0, lCY, lOY); \
- Y1 = vec_mradds (Y1, lCY, lOY); \
- Y2 = vec_mradds (Y2, lCY, lOY); \
- Y3 = vec_mradds (Y3, lCY, lOY); \
- \
- /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ \
- ux = vec_sl (U, lCSHIFT); \
- ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0)); \
- ux0 = vec_mergeh (ux,ux); \
- ux1 = vec_mergel (ux,ux); \
- \
- /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ \
- vx = vec_sl (V, lCSHIFT); \
- vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0)); \
- vx0 = vec_mergeh (vx,vx); \
- vx1 = vec_mergel (vx,vx); \
- \
- /* uvx = ((CGU*u) + (CGV*v))>>15 */ \
- uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0)); \
- uvx = vec_mradds (V, lCGV, uvx); \
- uvx0 = vec_mergeh (uvx,uvx); \
- uvx1 = vec_mergel (uvx,uvx); \
- \
- R0 = vec_add (Y0,vx0); \
- G0 = vec_add (Y0,uvx0); \
- B0 = vec_add (Y0,ux0); \
- R1 = vec_add (Y1,vx1); \
- G1 = vec_add (Y1,uvx1); \
- B1 = vec_add (Y1,ux1); \
- \
- R = vec_packclp (R0,R1); \
- G = vec_packclp (G0,G1); \
- B = vec_packclp (B0,B1); \
- \
- out_pixels(R,G,B,oute); \
- \
- R0 = vec_add (Y2,vx0); \
- G0 = vec_add (Y2,uvx0); \
- B0 = vec_add (Y2,ux0); \
- R1 = vec_add (Y3,vx1); \
- G1 = vec_add (Y3,uvx1); \
- B1 = vec_add (Y3,ux1); \
- R = vec_packclp (R0,R1); \
- G = vec_packclp (G0,G1); \
- B = vec_packclp (B0,B1); \
- \
- \
- out_pixels(R,G,B,outo); \
- \
- y1i += 16; \
- y2i += 16; \
- ui += 8; \
- vi += 8; \
- \
- } \
- \
- outo += (outstrides[0])>>4; \
- oute += (outstrides[0])>>4; \
- \
- ui += instrides_scl[1]; \
- vi += instrides_scl[2]; \
- y1i += instrides_scl[0]; \
- y2i += instrides_scl[0]; \
- } \
- return srcSliceH; \
+#define DEFCSP420_CVT(name,out_pixels) \
+static int altivec_##name (SwsContext *c, \
+ unsigned char **in, int *instrides, \
+ int srcSliceY, int srcSliceH, \
+ unsigned char **oplanes, int *outstrides) \
+{ \
+ int w = c->srcW; \
+ int h = srcSliceH; \
+ int i,j; \
+ int instrides_scl[3]; \
+ vector unsigned char y0,y1; \
+ \
+ vector signed char u,v; \
+ \
+ vector signed short Y0,Y1,Y2,Y3; \
+ vector signed short U,V; \
+ vector signed short vx,ux,uvx; \
+ vector signed short vx0,ux0,uvx0; \
+ vector signed short vx1,ux1,uvx1; \
+ vector signed short R0,G0,B0; \
+ vector signed short R1,G1,B1; \
+ vector unsigned char R,G,B; \
+ \
+ vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \
+ vector unsigned char align_perm; \
+ \
+ vector signed short \
+ lCY = c->CY, \
+ lOY = c->OY, \
+ lCRV = c->CRV, \
+ lCBU = c->CBU, \
+ lCGU = c->CGU, \
+ lCGV = c->CGV; \
+ \
+ vector unsigned short lCSHIFT = c->CSHIFT; \
+ \
+ ubyte *y1i = in[0]; \
+ ubyte *y2i = in[0]+instrides[0]; \
+ ubyte *ui = in[1]; \
+ ubyte *vi = in[2]; \
+ \
+ vector unsigned char *oute \
+ = (vector unsigned char *) \
+ (oplanes[0]+srcSliceY*outstrides[0]); \
+ vector unsigned char *outo \
+ = (vector unsigned char *) \
+ (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]); \
+ \
+ \
+ instrides_scl[0] = instrides[0]*2-w; /* the loop moves y{1,2}i by w */ \
+ instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */ \
+ instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */ \
+ \
+ \
+ for (i=0;i<h/2;i++) { \
+ vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0); \
+ vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1); \
+ \
+ for (j=0;j<w/16;j++) { \
+ \
+ y1ivP = (vector unsigned char *)y1i; \
+ y2ivP = (vector unsigned char *)y2i; \
+ uivP = (vector unsigned char *)ui; \
+ vivP = (vector unsigned char *)vi; \
+ \
+ align_perm = vec_lvsl (0, y1i); \
+ y0 = (vector unsigned char) \
+ vec_perm (y1ivP[0], y1ivP[1], align_perm); \
+ \
+ align_perm = vec_lvsl (0, y2i); \
+ y1 = (vector unsigned char) \
+ vec_perm (y2ivP[0], y2ivP[1], align_perm); \
+ \
+ align_perm = vec_lvsl (0, ui); \
+ u = (vector signed char) \
+ vec_perm (uivP[0], uivP[1], align_perm); \
+ \
+ align_perm = vec_lvsl (0, vi); \
+ v = (vector signed char) \
+ vec_perm (vivP[0], vivP[1], align_perm); \
+ \
+ u = (vector signed char) \
+ vec_sub (u,(vector signed char) \
+ vec_splat((vector signed char)AVV(128),0)); \
+ v = (vector signed char) \
+ vec_sub (v,(vector signed char) \
+ vec_splat((vector signed char)AVV(128),0)); \
+ \
+ U = vec_unpackh (u); \
+ V = vec_unpackh (v); \
+ \
+ \
+ Y0 = vec_unh (y0); \
+ Y1 = vec_unl (y0); \
+ Y2 = vec_unh (y1); \
+ Y3 = vec_unl (y1); \
+ \
+ Y0 = vec_mradds (Y0, lCY, lOY); \
+ Y1 = vec_mradds (Y1, lCY, lOY); \
+ Y2 = vec_mradds (Y2, lCY, lOY); \
+ Y3 = vec_mradds (Y3, lCY, lOY); \
+ \
+ /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */ \
+ ux = vec_sl (U, lCSHIFT); \
+ ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0)); \
+ ux0 = vec_mergeh (ux,ux); \
+ ux1 = vec_mergel (ux,ux); \
+ \
+ /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */ \
+ vx = vec_sl (V, lCSHIFT); \
+ vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0)); \
+ vx0 = vec_mergeh (vx,vx); \
+ vx1 = vec_mergel (vx,vx); \
+ \
+ /* uvx = ((CGU*u) + (CGV*v))>>15 */ \
+ uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0)); \
+ uvx = vec_mradds (V, lCGV, uvx); \
+ uvx0 = vec_mergeh (uvx,uvx); \
+ uvx1 = vec_mergel (uvx,uvx); \
+ \
+ R0 = vec_add (Y0,vx0); \
+ G0 = vec_add (Y0,uvx0); \
+ B0 = vec_add (Y0,ux0); \
+ R1 = vec_add (Y1,vx1); \
+ G1 = vec_add (Y1,uvx1); \
+ B1 = vec_add (Y1,ux1); \
+ \
+ R = vec_packclp (R0,R1); \
+ G = vec_packclp (G0,G1); \
+ B = vec_packclp (B0,B1); \
+ \
+ out_pixels(R,G,B,oute); \
+ \
+ R0 = vec_add (Y2,vx0); \
+ G0 = vec_add (Y2,uvx0); \
+ B0 = vec_add (Y2,ux0); \
+ R1 = vec_add (Y3,vx1); \
+ G1 = vec_add (Y3,uvx1); \
+ B1 = vec_add (Y3,ux1); \
+ R = vec_packclp (R0,R1); \
+ G = vec_packclp (G0,G1); \
+ B = vec_packclp (B0,B1); \
+ \
+ \
+ out_pixels(R,G,B,outo); \
+ \
+ y1i += 16; \
+ y2i += 16; \
+ ui += 8; \
+ vi += 8; \
+ \
+ } \
+ \
+ outo += (outstrides[0])>>4; \
+ oute += (outstrides[0])>>4; \
+ \
+ ui += instrides_scl[1]; \
+ vi += instrides_scl[2]; \
+ y1i += instrides_scl[0]; \
+ y2i += instrides_scl[0]; \
+ } \
+ return srcSliceH; \
}
@@ -443,151 +447,151 @@ DEFCSP420_CVT (yuv2_abgr, out_abgr)
#if 1
DEFCSP420_CVT (yuv2_bgra, out_bgra)
#else
-static int altivec_yuv2_bgra32 (SwsContext *c,
- unsigned char **in, int *instrides,
- int srcSliceY, int srcSliceH,
- unsigned char **oplanes, int *outstrides)
-{
- int w = c->srcW;
- int h = srcSliceH;
- int i,j;
- int instrides_scl[3];
- vector unsigned char y0,y1;
-
- vector signed char u,v;
-
- vector signed short Y0,Y1,Y2,Y3;
- vector signed short U,V;
- vector signed short vx,ux,uvx;
- vector signed short vx0,ux0,uvx0;
- vector signed short vx1,ux1,uvx1;
- vector signed short R0,G0,B0;
- vector signed short R1,G1,B1;
- vector unsigned char R,G,B;
-
- vector unsigned char *uivP, *vivP;
- vector unsigned char align_perm;
-
- vector signed short
- lCY = c->CY,
- lOY = c->OY,
- lCRV = c->CRV,
- lCBU = c->CBU,
- lCGU = c->CGU,
- lCGV = c->CGV;
-
- vector unsigned short lCSHIFT = c->CSHIFT;
-
- ubyte *y1i = in[0];
- ubyte *y2i = in[0]+w;
- ubyte *ui = in[1];
- ubyte *vi = in[2];
-
- vector unsigned char *oute
- = (vector unsigned char *)
- (oplanes[0]+srcSliceY*outstrides[0]);
- vector unsigned char *outo
- = (vector unsigned char *)
- (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);
-
-
- instrides_scl[0] = instrides[0];
- instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */
- instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */
-
-
- for (i=0;i<h/2;i++) {
- vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);
- vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);
-
- for (j=0;j<w/16;j++) {
-
- y0 = vec_ldl (0,y1i);
- y1 = vec_ldl (0,y2i);
- uivP = (vector unsigned char *)ui;
- vivP = (vector unsigned char *)vi;
-
- align_perm = vec_lvsl (0, ui);
- u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm);
-
- align_perm = vec_lvsl (0, vi);
- v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm);
- u = (vector signed char)
- vec_sub (u,(vector signed char)
- vec_splat((vector signed char)AVV(128),0));
-
- v = (vector signed char)
- vec_sub (v, (vector signed char)
- vec_splat((vector signed char)AVV(128),0));
-
- U = vec_unpackh (u);
- V = vec_unpackh (v);
-
-
- Y0 = vec_unh (y0);
- Y1 = vec_unl (y0);
- Y2 = vec_unh (y1);
- Y3 = vec_unl (y1);
-
- Y0 = vec_mradds (Y0, lCY, lOY);
- Y1 = vec_mradds (Y1, lCY, lOY);
- Y2 = vec_mradds (Y2, lCY, lOY);
- Y3 = vec_mradds (Y3, lCY, lOY);
-
- /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */
- ux = vec_sl (U, lCSHIFT);
- ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0));
- ux0 = vec_mergeh (ux,ux);
- ux1 = vec_mergel (ux,ux);
-
- /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */
- vx = vec_sl (V, lCSHIFT);
- vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0));
- vx0 = vec_mergeh (vx,vx);
- vx1 = vec_mergel (vx,vx);
- /* uvx = ((CGU*u) + (CGV*v))>>15 */
- uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0));
- uvx = vec_mradds (V, lCGV, uvx);
- uvx0 = vec_mergeh (uvx,uvx);
- uvx1 = vec_mergel (uvx,uvx);
- R0 = vec_add (Y0,vx0);
- G0 = vec_add (Y0,uvx0);
- B0 = vec_add (Y0,ux0);
- R1 = vec_add (Y1,vx1);
- G1 = vec_add (Y1,uvx1);
- B1 = vec_add (Y1,ux1);
- R = vec_packclp (R0,R1);
- G = vec_packclp (G0,G1);
- B = vec_packclp (B0,B1);
-
- out_argb(R,G,B,oute);
- R0 = vec_add (Y2,vx0);
- G0 = vec_add (Y2,uvx0);
- B0 = vec_add (Y2,ux0);
- R1 = vec_add (Y3,vx1);
- G1 = vec_add (Y3,uvx1);
- B1 = vec_add (Y3,ux1);
- R = vec_packclp (R0,R1);
- G = vec_packclp (G0,G1);
- B = vec_packclp (B0,B1);
-
- out_argb(R,G,B,outo);
- y1i += 16;
- y2i += 16;
- ui += 8;
- vi += 8;
-
- }
-
- outo += (outstrides[0])>>4;
- oute += (outstrides[0])>>4;
-
- ui += instrides_scl[1];
- vi += instrides_scl[2];
- y1i += instrides_scl[0];
- y2i += instrides_scl[0];
- }
- return srcSliceH;
+static int altivec_yuv2_bgra32 (SwsContext *c,
+ unsigned char **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ unsigned char **oplanes, int *outstrides)
+{
+ int w = c->srcW;
+ int h = srcSliceH;
+ int i,j;
+ int instrides_scl[3];
+ vector unsigned char y0,y1;
+
+ vector signed char u,v;
+
+ vector signed short Y0,Y1,Y2,Y3;
+ vector signed short U,V;
+ vector signed short vx,ux,uvx;
+ vector signed short vx0,ux0,uvx0;
+ vector signed short vx1,ux1,uvx1;
+ vector signed short R0,G0,B0;
+ vector signed short R1,G1,B1;
+ vector unsigned char R,G,B;
+
+ vector unsigned char *uivP, *vivP;
+ vector unsigned char align_perm;
+
+ vector signed short
+ lCY = c->CY,
+ lOY = c->OY,
+ lCRV = c->CRV,
+ lCBU = c->CBU,
+ lCGU = c->CGU,
+ lCGV = c->CGV;
+
+ vector unsigned short lCSHIFT = c->CSHIFT;
+
+ ubyte *y1i = in[0];
+ ubyte *y2i = in[0]+w;
+ ubyte *ui = in[1];
+ ubyte *vi = in[2];
+
+ vector unsigned char *oute
+ = (vector unsigned char *)
+ (oplanes[0]+srcSliceY*outstrides[0]);
+ vector unsigned char *outo
+ = (vector unsigned char *)
+ (oplanes[0]+srcSliceY*outstrides[0]+outstrides[0]);
+
+
+ instrides_scl[0] = instrides[0];
+ instrides_scl[1] = instrides[1]-w/2; /* the loop moves ui by w/2 */
+ instrides_scl[2] = instrides[2]-w/2; /* the loop moves vi by w/2 */
+
+
+ for (i=0;i<h/2;i++) {
+ vec_dstst (outo, (0x02000002|(((w*3+32)/32)<<16)), 0);
+ vec_dstst (oute, (0x02000002|(((w*3+32)/32)<<16)), 1);
+
+ for (j=0;j<w/16;j++) {
+
+ y0 = vec_ldl (0,y1i);
+ y1 = vec_ldl (0,y2i);
+ uivP = (vector unsigned char *)ui;
+ vivP = (vector unsigned char *)vi;
+
+ align_perm = vec_lvsl (0, ui);
+ u = (vector signed char)vec_perm (uivP[0], uivP[1], align_perm);
+
+ align_perm = vec_lvsl (0, vi);
+ v = (vector signed char)vec_perm (vivP[0], vivP[1], align_perm);
+ u = (vector signed char)
+ vec_sub (u,(vector signed char)
+ vec_splat((vector signed char)AVV(128),0));
+
+ v = (vector signed char)
+ vec_sub (v, (vector signed char)
+ vec_splat((vector signed char)AVV(128),0));
+
+ U = vec_unpackh (u);
+ V = vec_unpackh (v);
+
+
+ Y0 = vec_unh (y0);
+ Y1 = vec_unl (y0);
+ Y2 = vec_unh (y1);
+ Y3 = vec_unl (y1);
+
+ Y0 = vec_mradds (Y0, lCY, lOY);
+ Y1 = vec_mradds (Y1, lCY, lOY);
+ Y2 = vec_mradds (Y2, lCY, lOY);
+ Y3 = vec_mradds (Y3, lCY, lOY);
+
+ /* ux = (CBU*(u<<CSHIFT)+0x4000)>>15 */
+ ux = vec_sl (U, lCSHIFT);
+ ux = vec_mradds (ux, lCBU, (vector signed short)AVV(0));
+ ux0 = vec_mergeh (ux,ux);
+ ux1 = vec_mergel (ux,ux);
+
+ /* vx = (CRV*(v<<CSHIFT)+0x4000)>>15; */
+ vx = vec_sl (V, lCSHIFT);
+ vx = vec_mradds (vx, lCRV, (vector signed short)AVV(0));
+ vx0 = vec_mergeh (vx,vx);
+ vx1 = vec_mergel (vx,vx);
+ /* uvx = ((CGU*u) + (CGV*v))>>15 */
+ uvx = vec_mradds (U, lCGU, (vector signed short)AVV(0));
+ uvx = vec_mradds (V, lCGV, uvx);
+ uvx0 = vec_mergeh (uvx,uvx);
+ uvx1 = vec_mergel (uvx,uvx);
+ R0 = vec_add (Y0,vx0);
+ G0 = vec_add (Y0,uvx0);
+ B0 = vec_add (Y0,ux0);
+ R1 = vec_add (Y1,vx1);
+ G1 = vec_add (Y1,uvx1);
+ B1 = vec_add (Y1,ux1);
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ out_argb(R,G,B,oute);
+ R0 = vec_add (Y2,vx0);
+ G0 = vec_add (Y2,uvx0);
+ B0 = vec_add (Y2,ux0);
+ R1 = vec_add (Y3,vx1);
+ G1 = vec_add (Y3,uvx1);
+ B1 = vec_add (Y3,ux1);
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ out_argb(R,G,B,outo);
+ y1i += 16;
+ y2i += 16;
+ ui += 8;
+ vi += 8;
+
+ }
+
+ outo += (outstrides[0])>>4;
+ oute += (outstrides[0])>>4;
+
+ ui += instrides_scl[1];
+ vi += instrides_scl[2];
+ y1i += instrides_scl[0];
+ y2i += instrides_scl[0];
+ }
+ return srcSliceH;
}
#endif
@@ -603,77 +607,77 @@ DEFCSP420_CVT (yuv2_bgr24, out_bgr24)
// 0123 4567 89ab cdef
static
const vector unsigned char
- demux_u = (const vector unsigned char)AVV(0x10,0x00,0x10,0x00,
- 0x10,0x04,0x10,0x04,
- 0x10,0x08,0x10,0x08,
- 0x10,0x0c,0x10,0x0c),
- demux_v = (const vector unsigned char)AVV(0x10,0x02,0x10,0x02,
- 0x10,0x06,0x10,0x06,
- 0x10,0x0A,0x10,0x0A,
- 0x10,0x0E,0x10,0x0E),
- demux_y = (const vector unsigned char)AVV(0x10,0x01,0x10,0x03,
- 0x10,0x05,0x10,0x07,
- 0x10,0x09,0x10,0x0B,
- 0x10,0x0D,0x10,0x0F);
+ demux_u = (const vector unsigned char)AVV(0x10,0x00,0x10,0x00,
+ 0x10,0x04,0x10,0x04,
+ 0x10,0x08,0x10,0x08,
+ 0x10,0x0c,0x10,0x0c),
+ demux_v = (const vector unsigned char)AVV(0x10,0x02,0x10,0x02,
+ 0x10,0x06,0x10,0x06,
+ 0x10,0x0A,0x10,0x0A,
+ 0x10,0x0E,0x10,0x0E),
+ demux_y = (const vector unsigned char)AVV(0x10,0x01,0x10,0x03,
+ 0x10,0x05,0x10,0x07,
+ 0x10,0x09,0x10,0x0B,
+ 0x10,0x0D,0x10,0x0F);
/*
this is so I can play live CCIR raw video
*/
static int altivec_uyvy_rgb32 (SwsContext *c,
- unsigned char **in, int *instrides,
- int srcSliceY, int srcSliceH,
- unsigned char **oplanes, int *outstrides)
+ unsigned char **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ unsigned char **oplanes, int *outstrides)
{
- int w = c->srcW;
- int h = srcSliceH;
- int i,j;
- vector unsigned char uyvy;
- vector signed short Y,U,V;
- vector signed short R0,G0,B0,R1,G1,B1;
- vector unsigned char R,G,B;
- vector unsigned char *out;
- ubyte *img;
+ int w = c->srcW;
+ int h = srcSliceH;
+ int i,j;
+ vector unsigned char uyvy;
+ vector signed short Y,U,V;
+ vector signed short R0,G0,B0,R1,G1,B1;
+ vector unsigned char R,G,B;
+ vector unsigned char *out;
+ ubyte *img;
- img = in[0];
- out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
+ img = in[0];
+ out = (vector unsigned char *)(oplanes[0]+srcSliceY*outstrides[0]);
- for (i=0;i<h;i++) {
- for (j=0;j<w/16;j++) {
- uyvy = vec_ld (0, img);
- U = (vector signed short)
- vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
+ for (i=0;i<h;i++) {
+ for (j=0;j<w/16;j++) {
+ uyvy = vec_ld (0, img);
+ U = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
- V = (vector signed short)
- vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
+ V = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
- Y = (vector signed short)
- vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
+ Y = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
- cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
+ cvtyuvtoRGB (c, Y,U,V,&R0,&G0,&B0);
- uyvy = vec_ld (16, img);
- U = (vector signed short)
- vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
+ uyvy = vec_ld (16, img);
+ U = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_u);
- V = (vector signed short)
- vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
+ V = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_v);
- Y = (vector signed short)
- vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
+ Y = (vector signed short)
+ vec_perm (uyvy, (vector unsigned char)AVV(0), demux_y);
- cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
+ cvtyuvtoRGB (c, Y,U,V,&R1,&G1,&B1);
- R = vec_packclp (R0,R1);
- G = vec_packclp (G0,G1);
- B = vec_packclp (B0,B1);
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
- // vec_mstbgr24 (R,G,B, out);
- out_rgba (R,G,B,out);
+ // vec_mstbgr24 (R,G,B, out);
+ out_rgba (R,G,B,out);
- img += 32;
+ img += 32;
+ }
}
- }
- return srcSliceH;
+ return srcSliceH;
}
@@ -686,278 +690,278 @@ static int altivec_uyvy_rgb32 (SwsContext *c,
*/
SwsFunc yuv2rgb_init_altivec (SwsContext *c)
{
- if (!(c->flags & SWS_CPU_CAPS_ALTIVEC))
- return NULL;
+ if (!(c->flags & SWS_CPU_CAPS_ALTIVEC))
+ return NULL;
- /*
- and this seems not to matter too much I tried a bunch of
- videos with abnormal widths and mplayer crashes else where.
- mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
- boom with X11 bad match.
-
- */
- if ((c->srcW & 0xf) != 0) return NULL;
-
- switch (c->srcFormat) {
- case PIX_FMT_YUV410P:
- case PIX_FMT_YUV420P:
- /*case IMGFMT_CLPL: ??? */
- case PIX_FMT_GRAY8:
- case PIX_FMT_NV12:
- case PIX_FMT_NV21:
- if ((c->srcH & 0x1) != 0)
- return NULL;
-
- switch(c->dstFormat){
- case PIX_FMT_RGB24:
- av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n");
- return altivec_yuv2_rgb24;
- case PIX_FMT_BGR24:
- av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n");
- return altivec_yuv2_bgr24;
- case PIX_FMT_ARGB:
- av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n");
- return altivec_yuv2_argb;
- case PIX_FMT_ABGR:
- av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n");
- return altivec_yuv2_abgr;
- case PIX_FMT_RGBA:
- av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n");
- return altivec_yuv2_rgba;
- case PIX_FMT_BGRA:
- av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n");
- return altivec_yuv2_bgra;
- default: return NULL;
- }
- break;
-
- case PIX_FMT_UYVY422:
- switch(c->dstFormat){
- case PIX_FMT_BGR32:
- av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n");
- return altivec_uyvy_rgb32;
- default: return NULL;
- }
- break;
+ /*
+ and this seems not to matter too much I tried a bunch of
+ videos with abnormal widths and mplayer crashes else where.
+ mplayer -vo x11 -rawvideo on:w=350:h=240 raw-350x240.eyuv
+ boom with X11 bad match.
- }
- return NULL;
+ */
+ if ((c->srcW & 0xf) != 0) return NULL;
+
+ switch (c->srcFormat) {
+ case PIX_FMT_YUV410P:
+ case PIX_FMT_YUV420P:
+ /*case IMGFMT_CLPL: ??? */
+ case PIX_FMT_GRAY8:
+ case PIX_FMT_NV12:
+ case PIX_FMT_NV21:
+ if ((c->srcH & 0x1) != 0)
+ return NULL;
+
+ switch(c->dstFormat){
+ case PIX_FMT_RGB24:
+ av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGB24\n");
+ return altivec_yuv2_rgb24;
+ case PIX_FMT_BGR24:
+ av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGR24\n");
+ return altivec_yuv2_bgr24;
+ case PIX_FMT_ARGB:
+ av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ARGB\n");
+ return altivec_yuv2_argb;
+ case PIX_FMT_ABGR:
+ av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space ABGR\n");
+ return altivec_yuv2_abgr;
+ case PIX_FMT_RGBA:
+ av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space RGBA\n");
+ return altivec_yuv2_rgba;
+ case PIX_FMT_BGRA:
+ av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space BGRA\n");
+ return altivec_yuv2_bgra;
+ default: return NULL;
+ }
+ break;
+
+ case PIX_FMT_UYVY422:
+ switch(c->dstFormat){
+ case PIX_FMT_BGR32:
+ av_log(c, AV_LOG_WARNING, "ALTIVEC: Color Space UYVY -> RGB32\n");
+ return altivec_uyvy_rgb32;
+ default: return NULL;
+ }
+ break;
+
+ }
+ return NULL;
}
static uint16_t roundToInt16(int64_t f){
- int r= (f + (1<<15))>>16;
- if(r<-0x7FFF) return 0x8000;
- else if(r> 0x7FFF) return 0x7FFF;
- else return r;
+ int r= (f + (1<<15))>>16;
+ if (r<-0x7FFF) return 0x8000;
+ else if (r> 0x7FFF) return 0x7FFF;
+ else return r;
}
void yuv2rgb_altivec_init_tables (SwsContext *c, const int inv_table[4],int brightness,int contrast, int saturation)
{
- union {
- signed short tmp[8] __attribute__ ((aligned(16)));
- vector signed short vec;
- } buf;
-
- buf.tmp[0] = ( (0xffffLL) * contrast>>8 )>>9; //cy
- buf.tmp[1] = -256*brightness; //oy
- buf.tmp[2] = (inv_table[0]>>3) *(contrast>>16)*(saturation>>16); //crv
- buf.tmp[3] = (inv_table[1]>>3) *(contrast>>16)*(saturation>>16); //cbu
- buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16)); //cgu
- buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16)); //cgv
-
-
- c->CSHIFT = (vector unsigned short)vec_splat_u16(2);
- c->CY = vec_splat ((vector signed short)buf.vec, 0);
- c->OY = vec_splat ((vector signed short)buf.vec, 1);
- c->CRV = vec_splat ((vector signed short)buf.vec, 2);
- c->CBU = vec_splat ((vector signed short)buf.vec, 3);
- c->CGU = vec_splat ((vector signed short)buf.vec, 4);
- c->CGV = vec_splat ((vector signed short)buf.vec, 5);
+ union {
+ signed short tmp[8] __attribute__ ((aligned(16)));
+ vector signed short vec;
+ } buf;
+
+ buf.tmp[0] = ((0xffffLL) * contrast>>8)>>9; //cy
+ buf.tmp[1] = -256*brightness; //oy
+ buf.tmp[2] = (inv_table[0]>>3) *(contrast>>16)*(saturation>>16); //crv
+ buf.tmp[3] = (inv_table[1]>>3) *(contrast>>16)*(saturation>>16); //cbu
+ buf.tmp[4] = -((inv_table[2]>>1)*(contrast>>16)*(saturation>>16)); //cgu
+ buf.tmp[5] = -((inv_table[3]>>1)*(contrast>>16)*(saturation>>16)); //cgv
+
+
+ c->CSHIFT = (vector unsigned short)vec_splat_u16(2);
+ c->CY = vec_splat ((vector signed short)buf.vec, 0);
+ c->OY = vec_splat ((vector signed short)buf.vec, 1);
+ c->CRV = vec_splat ((vector signed short)buf.vec, 2);
+ c->CBU = vec_splat ((vector signed short)buf.vec, 3);
+ c->CGU = vec_splat ((vector signed short)buf.vec, 4);
+ c->CGV = vec_splat ((vector signed short)buf.vec, 5);
#if 0
-{
-int i;
-char *v[6]={"cy","oy","crv","cbu","cgu","cgv"};
-for (i=0; i<6;i++)
- printf("%s %d ", v[i],buf.tmp[i] );
- printf("\n");
-}
+ {
+ int i;
+ char *v[6]={"cy","oy","crv","cbu","cgu","cgv"};
+ for (i=0; i<6; i++)
+ printf("%s %d ", v[i],buf.tmp[i] );
+ printf("\n");
+ }
#endif
- return;
+ return;
}
void
altivec_yuv2packedX (SwsContext *c,
- int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
- int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
- uint8_t *dest, int dstW, int dstY)
+ int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
+ int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
+ uint8_t *dest, int dstW, int dstY)
{
- int i,j;
- vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
- vector signed short R0,G0,B0,R1,G1,B1;
+ int i,j;
+ vector signed short X,X0,X1,Y0,U0,V0,Y1,U1,V1,U,V;
+ vector signed short R0,G0,B0,R1,G1,B1;
- vector unsigned char R,G,B;
- vector unsigned char *out,*nout;
+ vector unsigned char R,G,B;
+ vector unsigned char *out,*nout;
- vector signed short RND = vec_splat_s16(1<<3);
- vector unsigned short SCL = vec_splat_u16(4);
- unsigned long scratch[16] __attribute__ ((aligned (16)));
+ vector signed short RND = vec_splat_s16(1<<3);
+ vector unsigned short SCL = vec_splat_u16(4);
+ unsigned long scratch[16] __attribute__ ((aligned (16)));
- vector signed short *YCoeffs, *CCoeffs;
+ vector signed short *YCoeffs, *CCoeffs;
- YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
- CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
+ YCoeffs = c->vYCoeffsBank+dstY*lumFilterSize;
+ CCoeffs = c->vCCoeffsBank+dstY*chrFilterSize;
- out = (vector unsigned char *)dest;
+ out = (vector unsigned char *)dest;
- for(i=0; i<dstW; i+=16){
- Y0 = RND;
- Y1 = RND;
- /* extract 16 coeffs from lumSrc */
- for(j=0; j<lumFilterSize; j++) {
- X0 = vec_ld (0, &lumSrc[j][i]);
- X1 = vec_ld (16, &lumSrc[j][i]);
- Y0 = vec_mradds (X0, YCoeffs[j], Y0);
- Y1 = vec_mradds (X1, YCoeffs[j], Y1);
- }
-
- U = RND;
- V = RND;
- /* extract 8 coeffs from U,V */
- for(j=0; j<chrFilterSize; j++) {
- X = vec_ld (0, &chrSrc[j][i/2]);
- U = vec_mradds (X, CCoeffs[j], U);
- X = vec_ld (0, &chrSrc[j][i/2+2048]);
- V = vec_mradds (X, CCoeffs[j], V);
- }
-
- /* scale and clip signals */
- Y0 = vec_sra (Y0, SCL);
- Y1 = vec_sra (Y1, SCL);
- U = vec_sra (U, SCL);
- V = vec_sra (V, SCL);
-
- Y0 = vec_clip_s16 (Y0);
- Y1 = vec_clip_s16 (Y1);
- U = vec_clip_s16 (U);
- V = vec_clip_s16 (V);
-
- /* now we have
- Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
- U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
-
- Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
- U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
- V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
- */
+ for (i=0; i<dstW; i+=16){
+ Y0 = RND;
+ Y1 = RND;
+ /* extract 16 coeffs from lumSrc */
+ for (j=0; j<lumFilterSize; j++) {
+ X0 = vec_ld (0, &lumSrc[j][i]);
+ X1 = vec_ld (16, &lumSrc[j][i]);
+ Y0 = vec_mradds (X0, YCoeffs[j], Y0);
+ Y1 = vec_mradds (X1, YCoeffs[j], Y1);
+ }
- U0 = vec_mergeh (U,U);
- V0 = vec_mergeh (V,V);
-
- U1 = vec_mergel (U,U);
- V1 = vec_mergel (V,V);
-
- cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
- cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
-
- R = vec_packclp (R0,R1);
- G = vec_packclp (G0,G1);
- B = vec_packclp (B0,B1);
-
- switch(c->dstFormat) {
- case PIX_FMT_ABGR: out_abgr (R,G,B,out); break;
- case PIX_FMT_BGRA: out_bgra (R,G,B,out); break;
- case PIX_FMT_RGBA: out_rgba (R,G,B,out); break;
- case PIX_FMT_ARGB: out_argb (R,G,B,out); break;
- case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break;
- case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break;
- default:
- {
- /* If this is reached, the caller should have called yuv2packedXinC
- instead. */
- static int printed_error_message;
- if(!printed_error_message) {
- av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
- sws_format_name(c->dstFormat));
- printed_error_message=1;
- }
- return;
+ U = RND;
+ V = RND;
+ /* extract 8 coeffs from U,V */
+ for (j=0; j<chrFilterSize; j++) {
+ X = vec_ld (0, &chrSrc[j][i/2]);
+ U = vec_mradds (X, CCoeffs[j], U);
+ X = vec_ld (0, &chrSrc[j][i/2+2048]);
+ V = vec_mradds (X, CCoeffs[j], V);
}
- }
- }
-
- if (i < dstW) {
- i -= 16;
-
- Y0 = RND;
- Y1 = RND;
- /* extract 16 coeffs from lumSrc */
- for(j=0; j<lumFilterSize; j++) {
- X0 = vec_ld (0, &lumSrc[j][i]);
- X1 = vec_ld (16, &lumSrc[j][i]);
- Y0 = vec_mradds (X0, YCoeffs[j], Y0);
- Y1 = vec_mradds (X1, YCoeffs[j], Y1);
- }
- U = RND;
- V = RND;
- /* extract 8 coeffs from U,V */
- for(j=0; j<chrFilterSize; j++) {
- X = vec_ld (0, &chrSrc[j][i/2]);
- U = vec_mradds (X, CCoeffs[j], U);
- X = vec_ld (0, &chrSrc[j][i/2+2048]);
- V = vec_mradds (X, CCoeffs[j], V);
+ /* scale and clip signals */
+ Y0 = vec_sra (Y0, SCL);
+ Y1 = vec_sra (Y1, SCL);
+ U = vec_sra (U, SCL);
+ V = vec_sra (V, SCL);
+
+ Y0 = vec_clip_s16 (Y0);
+ Y1 = vec_clip_s16 (Y1);
+ U = vec_clip_s16 (U);
+ V = vec_clip_s16 (V);
+
+ /* now we have
+ Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
+ U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
+
+ Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
+ U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
+ V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
+ */
+
+ U0 = vec_mergeh (U,U);
+ V0 = vec_mergeh (V,V);
+
+ U1 = vec_mergel (U,U);
+ V1 = vec_mergel (V,V);
+
+ cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
+ cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
+
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ switch(c->dstFormat) {
+ case PIX_FMT_ABGR: out_abgr (R,G,B,out); break;
+ case PIX_FMT_BGRA: out_bgra (R,G,B,out); break;
+ case PIX_FMT_RGBA: out_rgba (R,G,B,out); break;
+ case PIX_FMT_ARGB: out_argb (R,G,B,out); break;
+ case PIX_FMT_RGB24: out_rgb24 (R,G,B,out); break;
+ case PIX_FMT_BGR24: out_bgr24 (R,G,B,out); break;
+ default:
+ {
+ /* If this is reached, the caller should have called yuv2packedXinC
+ instead. */
+ static int printed_error_message;
+ if (!printed_error_message) {
+ av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
+ sws_format_name(c->dstFormat));
+ printed_error_message=1;
+ }
+ return;
+ }
+ }
}
- /* scale and clip signals */
- Y0 = vec_sra (Y0, SCL);
- Y1 = vec_sra (Y1, SCL);
- U = vec_sra (U, SCL);
- V = vec_sra (V, SCL);
-
- Y0 = vec_clip_s16 (Y0);
- Y1 = vec_clip_s16 (Y1);
- U = vec_clip_s16 (U);
- V = vec_clip_s16 (V);
+ if (i < dstW) {
+ i -= 16;
+
+ Y0 = RND;
+ Y1 = RND;
+ /* extract 16 coeffs from lumSrc */
+ for (j=0; j<lumFilterSize; j++) {
+ X0 = vec_ld (0, &lumSrc[j][i]);
+ X1 = vec_ld (16, &lumSrc[j][i]);
+ Y0 = vec_mradds (X0, YCoeffs[j], Y0);
+ Y1 = vec_mradds (X1, YCoeffs[j], Y1);
+ }
- /* now we have
- Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
- U= u0 u1 u2 u3 u4 u5 u6 u7 V= v0 v1 v2 v3 v4 v5 v6 v7
+ U = RND;
+ V = RND;
+ /* extract 8 coeffs from U,V */
+ for (j=0; j<chrFilterSize; j++) {
+ X = vec_ld (0, &chrSrc[j][i/2]);
+ U = vec_mradds (X, CCoeffs[j], U);
+ X = vec_ld (0, &chrSrc[j][i/2+2048]);
+ V = vec_mradds (X, CCoeffs[j], V);
+ }
- Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
- U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
- V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
- */
+ /* scale and clip signals */
+ Y0 = vec_sra (Y0, SCL);
+ Y1 = vec_sra (Y1, SCL);
+ U = vec_sra (U, SCL);
+ V = vec_sra (V, SCL);
+
+ Y0 = vec_clip_s16 (Y0);
+ Y1 = vec_clip_s16 (Y1);
+ U = vec_clip_s16 (U);
+ V = vec_clip_s16 (V);
+
+ /* now we have
+ Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
+ U = u0 u1 u2 u3 u4 u5 u6 u7 V = v0 v1 v2 v3 v4 v5 v6 v7
+
+ Y0= y0 y1 y2 y3 y4 y5 y6 y7 Y1= y8 y9 y10 y11 y12 y13 y14 y15
+ U0= u0 u0 u1 u1 u2 u2 u3 u3 U1= u4 u4 u5 u5 u6 u6 u7 u7
+ V0= v0 v0 v1 v1 v2 v2 v3 v3 V1= v4 v4 v5 v5 v6 v6 v7 v7
+ */
+
+ U0 = vec_mergeh (U,U);
+ V0 = vec_mergeh (V,V);
+
+ U1 = vec_mergel (U,U);
+ V1 = vec_mergel (V,V);
+
+ cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
+ cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
+
+ R = vec_packclp (R0,R1);
+ G = vec_packclp (G0,G1);
+ B = vec_packclp (B0,B1);
+
+ nout = (vector unsigned char *)scratch;
+ switch(c->dstFormat) {
+ case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break;
+ case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break;
+ case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break;
+ case PIX_FMT_ARGB: out_argb (R,G,B,nout); break;
+ case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break;
+ case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break;
+ default:
+ /* Unreachable, I think. */
+ av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
+ sws_format_name(c->dstFormat));
+ return;
+ }
- U0 = vec_mergeh (U,U);
- V0 = vec_mergeh (V,V);
-
- U1 = vec_mergel (U,U);
- V1 = vec_mergel (V,V);
-
- cvtyuvtoRGB (c, Y0,U0,V0,&R0,&G0,&B0);
- cvtyuvtoRGB (c, Y1,U1,V1,&R1,&G1,&B1);
-
- R = vec_packclp (R0,R1);
- G = vec_packclp (G0,G1);
- B = vec_packclp (B0,B1);
-
- nout = (vector unsigned char *)scratch;
- switch(c->dstFormat) {
- case PIX_FMT_ABGR: out_abgr (R,G,B,nout); break;
- case PIX_FMT_BGRA: out_bgra (R,G,B,nout); break;
- case PIX_FMT_RGBA: out_rgba (R,G,B,nout); break;
- case PIX_FMT_ARGB: out_argb (R,G,B,nout); break;
- case PIX_FMT_RGB24: out_rgb24 (R,G,B,nout); break;
- case PIX_FMT_BGR24: out_bgr24 (R,G,B,nout); break;
- default:
- /* Unreachable, I think. */
- av_log(c, AV_LOG_ERROR, "altivec_yuv2packedX doesn't support %s output\n",
- sws_format_name(c->dstFormat));
- return;
+ memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
}
- memcpy (&((uint32_t*)dest)[i], scratch, (dstW-i)/4);
- }
-
}
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_bfin.c b/contrib/ffmpeg/libswscale/yuv2rgb_bfin.c
new file mode 100644
index 000000000..d2770793b
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/yuv2rgb_bfin.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2007 Marc Hoffman <marc.hoffman@analog.com>
+ * April 20, 2007
+ *
+ * Blackfin Video Color Space Converters Operations
+ * convert I420 YV12 to RGB in various formats,
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+#include <assert.h>
+#include "config.h"
+#ifdef HAVE_MALLOC_H
+#include <malloc.h>
+#endif
+#include <unistd.h>
+#include "rgb2rgb.h"
+#include "swscale.h"
+#include "swscale_internal.h"
+
+#ifdef __FDPIC__
+#define L1CODE __attribute__ ((l1_text))
+#else
+#define L1CODE
+#endif
+
+extern void ff_bfin_yuv2rgb555_line (uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out,
+ int w, uint32_t *coeffs) L1CODE;
+
+extern void ff_bfin_yuv2rgb565_line (uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out,
+ int w, uint32_t *coeffs) L1CODE;
+
+extern void ff_bfin_yuv2rgb24_line (uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out,
+ int w, uint32_t *coeffs) L1CODE;
+
+typedef void (* ltransform_t)(uint8_t *Y, uint8_t *U, uint8_t *V, uint8_t *out,
+ int w, uint32_t *coeffs);
+
+
+static void bfin_prepare_coefficients (SwsContext *c, int rgb, int masks)
+{
+ int oy;
+ oy = c->yOffset&0xffff;
+ oy = oy >> 3; // keep everything U8.0 for offset calculation
+
+ c->oc = 128*0x01010101U;
+ c->oy = oy*0x01010101U;
+
+ /* copy 64bit vector coeffs down to 32bit vector coeffs */
+ c->cy = c->yCoeff;
+ c->zero = 0;
+
+ if (rgb) {
+ c->crv = c->vrCoeff;
+ c->cbu = c->ubCoeff;
+ c->cgu = c->ugCoeff;
+ c->cgv = c->vgCoeff;
+ } else {
+ c->crv = c->ubCoeff;
+ c->cbu = c->vrCoeff;
+ c->cgu = c->vgCoeff;
+ c->cgv = c->ugCoeff;
+ }
+
+
+ if (masks == 555) {
+ c->rmask = 0x001f * 0x00010001U;
+ c->gmask = 0x03e0 * 0x00010001U;
+ c->bmask = 0x7c00 * 0x00010001U;
+ } else if (masks == 565) {
+ c->rmask = 0x001f * 0x00010001U;
+ c->gmask = 0x07e0 * 0x00010001U;
+ c->bmask = 0xf800 * 0x00010001U;
+ }
+}
+
+static int core_yuv420_rgb (SwsContext *c,
+ uint8_t **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ uint8_t **oplanes, int *outstrides,
+ ltransform_t lcscf, int rgb, int masks)
+{
+ uint8_t *py,*pu,*pv,*op;
+ int w = instrides[0];
+ int h2 = srcSliceH>>1;
+ int i;
+
+ bfin_prepare_coefficients (c, rgb, masks);
+
+ py = in[0];
+ pu = in[1+(1^rgb)];
+ pv = in[1+(0^rgb)];
+
+ op = oplanes[0] + srcSliceY*outstrides[0];
+
+ for (i=0;i<h2;i++) {
+
+ lcscf (py, pu, pv, op, w, &c->oy);
+
+ py += instrides[0];
+ op += outstrides[0];
+
+ lcscf (py, pu, pv, op, w, &c->oy);
+
+ py += instrides[0];
+ pu += instrides[1];
+ pv += instrides[2];
+ op += outstrides[0];
+ }
+
+ return srcSliceH;
+}
+
+
+static int bfin_yuv420_rgb555 (SwsContext *c,
+ uint8_t **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ uint8_t **oplanes, int *outstrides)
+{
+ return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
+ ff_bfin_yuv2rgb555_line, 1, 555);
+}
+
+static int bfin_yuv420_bgr555 (SwsContext *c,
+ uint8_t **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ uint8_t **oplanes, int *outstrides)
+{
+ return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
+ ff_bfin_yuv2rgb555_line, 0, 555);
+}
+
+static int bfin_yuv420_rgb24 (SwsContext *c,
+ uint8_t **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ uint8_t **oplanes, int *outstrides)
+{
+ return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
+ ff_bfin_yuv2rgb24_line, 1, 888);
+}
+
+static int bfin_yuv420_bgr24 (SwsContext *c,
+ uint8_t **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ uint8_t **oplanes, int *outstrides)
+{
+ return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
+ ff_bfin_yuv2rgb24_line, 0, 888);
+}
+
+static int bfin_yuv420_rgb565 (SwsContext *c,
+ uint8_t **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ uint8_t **oplanes, int *outstrides)
+{
+ return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
+ ff_bfin_yuv2rgb565_line, 1, 565);
+}
+
+static int bfin_yuv420_bgr565 (SwsContext *c,
+ uint8_t **in, int *instrides,
+ int srcSliceY, int srcSliceH,
+ uint8_t **oplanes, int *outstrides)
+{
+ return core_yuv420_rgb (c, in, instrides, srcSliceY, srcSliceH, oplanes, outstrides,
+ ff_bfin_yuv2rgb565_line, 0, 565);
+}
+
+
+SwsFunc ff_bfin_yuv2rgb_get_func_ptr (SwsContext *c)
+{
+ SwsFunc f;
+
+ switch(c->dstFormat) {
+ case PIX_FMT_RGB555: f = bfin_yuv420_rgb555; break;
+ case PIX_FMT_BGR555: f = bfin_yuv420_bgr555; break;
+ case PIX_FMT_RGB565: f = bfin_yuv420_rgb565; break;
+ case PIX_FMT_BGR565: f = bfin_yuv420_bgr565; break;
+ case PIX_FMT_RGB24: f = bfin_yuv420_rgb24; break;
+ case PIX_FMT_BGR24: f = bfin_yuv420_bgr24; break;
+ default:
+ return 0;
+ }
+
+ av_log(c, AV_LOG_INFO, "BlackFin Accelerated Color Space Converter %s\n",
+ sws_format_name (c->dstFormat));
+
+ return f;
+}
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_init.c b/contrib/ffmpeg/libswscale/yuv2rgb_init.c
deleted file mode 100644
index 371dce5da..000000000
--- a/contrib/ffmpeg/libswscale/yuv2rgb_init.c
+++ /dev/null
@@ -1,412 +0,0 @@
-#include "avutil.h"
-#include "swscale.h"
-#include "swscale_internal.h"
-
-#define YTABLE_MIN 384
-
-/**
- * YUV -> RGB conversion matrixes (inverse of table 6.9 in MPEG2 standard)
- *
- * An YUV -> RGB conversion matrix is in the form
- * | 1 0 Rv |
- * | 1 Gu Gv |
- * | 1 Bu 0 |
- *
- * Inverse_Table_6_9 stores | Rv Bu Gv Gu | * 255/224*2^16.
- * \arg Maximum Rv value: 117570
- * \arg Maximum Bu value: 138420
- * \arg Maximum Gv + Gu value: 25642 + 53281 = 78923
- *
- * These values are needed to allocate table_{r, g, b}. If you modify
- * this table, please update allocate_tables() accordingly
- */
-const int32_t Inverse_Table_6_9[8][4] = {
- {0, 0, 0, 0}, /* no sequence_display_extension */
- {117500, 138420, -13985, -34933}, /* ITU-R Rec. 709 (1990) */
- {0, 0, 0, 0}, /* unspecified */
- {0, 0, 0, 0}, /* reserved */
- {104480, 132820, -24811, -53150}, /* FCC */
- {104570, 132210, -25642, -53281}, /* ITU-R Rec. 624-4 System B, G */
- {104570, 132210, -25642, -53281}, /* SMPTE 170M */
- {117570, 136230, -16892, -35552} /* SMPTE 240M (1987) */
-};
-
-
-/**
- * Dithering matrixes (these are bayer ordered dither matrixes
- * with some manual changes by Michael)
- */
-const uint8_t __attribute__((aligned(8))) dither_2x2_4[2][8]={
-{ 1, 3, 1, 3, 1, 3, 1, 3, },
-{ 2, 0, 2, 0, 2, 0, 2, 0, },
-};
-
-const uint8_t __attribute__((aligned(8))) dither_2x2_8[2][8]={
-{ 6, 2, 6, 2, 6, 2, 6, 2, },
-{ 0, 4, 0, 4, 0, 4, 0, 4, },
-};
-
-const uint8_t __attribute__((aligned(8))) dither_8x8_32[8][8]={
-{ 17, 9, 23, 15, 16, 8, 22, 14, },
-{ 5, 29, 3, 27, 4, 28, 2, 26, },
-{ 21, 13, 19, 11, 20, 12, 18, 10, },
-{ 0, 24, 6, 30, 1, 25, 7, 31, },
-{ 16, 8, 22, 14, 17, 9, 23, 15, },
-{ 4, 28, 2, 26, 5, 29, 3, 27, },
-{ 20, 12, 18, 10, 21, 13, 19, 11, },
-{ 1, 25, 7, 31, 0, 24, 6, 30, },
-};
-
-#if 0
-const uint8_t __attribute__((aligned(8))) dither_8x8_64[8][8]={
-{ 0, 48, 12, 60, 3, 51, 15, 63, },
-{ 32, 16, 44, 28, 35, 19, 47, 31, },
-{ 8, 56, 4, 52, 11, 59, 7, 55, },
-{ 40, 24, 36, 20, 43, 27, 39, 23, },
-{ 2, 50, 14, 62, 1, 49, 13, 61, },
-{ 34, 18, 46, 30, 33, 17, 45, 29, },
-{ 10, 58, 6, 54, 9, 57, 5, 53, },
-{ 42, 26, 38, 22, 41, 25, 37, 21, },
-};
-#endif
-
-const uint8_t __attribute__((aligned(8))) dither_8x8_73[8][8]={
-{ 0, 55, 14, 68, 3, 58, 17, 72, },
-{ 37, 18, 50, 32, 40, 22, 54, 35, },
-{ 9, 64, 5, 59, 13, 67, 8, 63, },
-{ 46, 27, 41, 23, 49, 31, 44, 26, },
-{ 2, 57, 16, 71, 1, 56, 15, 70, },
-{ 39, 21, 52, 34, 38, 19, 51, 33, },
-{ 11, 66, 7, 62, 10, 65, 6, 60, },
-{ 48, 30, 43, 25, 47, 29, 42, 24, },
-};
-
-#if 0
-const uint8_t __attribute__((aligned(8))) dither_8x8_128[8][8]={
-{ 68, 36, 92, 60, 66, 34, 90, 58, },
-{ 20, 116, 12, 108, 18, 114, 10, 106, },
-{ 84, 52, 76, 44, 82, 50, 74, 42, },
-{ 0, 96, 24, 120, 6, 102, 30, 126, },
-{ 64, 32, 88, 56, 70, 38, 94, 62, },
-{ 16, 112, 8, 104, 22, 118, 14, 110, },
-{ 80, 48, 72, 40, 86, 54, 78, 46, },
-{ 4, 100, 28, 124, 2, 98, 26, 122, },
-};
-#endif
-
-#if 1
-const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
-{117, 62, 158, 103, 113, 58, 155, 100, },
-{ 34, 199, 21, 186, 31, 196, 17, 182, },
-{144, 89, 131, 76, 141, 86, 127, 72, },
-{ 0, 165, 41, 206, 10, 175, 52, 217, },
-{110, 55, 151, 96, 120, 65, 162, 107, },
-{ 28, 193, 14, 179, 38, 203, 24, 189, },
-{138, 83, 124, 69, 148, 93, 134, 79, },
-{ 7, 172, 48, 213, 3, 168, 45, 210, },
-};
-#elif 1
-// tries to correct a gamma of 1.5
-const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
-{ 0, 143, 18, 200, 2, 156, 25, 215, },
-{ 78, 28, 125, 64, 89, 36, 138, 74, },
-{ 10, 180, 3, 161, 16, 195, 8, 175, },
-{109, 51, 93, 38, 121, 60, 105, 47, },
-{ 1, 152, 23, 210, 0, 147, 20, 205, },
-{ 85, 33, 134, 71, 81, 30, 130, 67, },
-{ 14, 190, 6, 171, 12, 185, 5, 166, },
-{117, 57, 101, 44, 113, 54, 97, 41, },
-};
-#elif 1
-// tries to correct a gamma of 2.0
-const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
-{ 0, 124, 8, 193, 0, 140, 12, 213, },
-{ 55, 14, 104, 42, 66, 19, 119, 52, },
-{ 3, 168, 1, 145, 6, 187, 3, 162, },
-{ 86, 31, 70, 21, 99, 39, 82, 28, },
-{ 0, 134, 11, 206, 0, 129, 9, 200, },
-{ 62, 17, 114, 48, 58, 16, 109, 45, },
-{ 5, 181, 2, 157, 4, 175, 1, 151, },
-{ 95, 36, 78, 26, 90, 34, 74, 24, },
-};
-#else
-// tries to correct a gamma of 2.5
-const uint8_t __attribute__((aligned(8))) dither_8x8_220[8][8]={
-{ 0, 107, 3, 187, 0, 125, 6, 212, },
-{ 39, 7, 86, 28, 49, 11, 102, 36, },
-{ 1, 158, 0, 131, 3, 180, 1, 151, },
-{ 68, 19, 52, 12, 81, 25, 64, 17, },
-{ 0, 119, 5, 203, 0, 113, 4, 195, },
-{ 45, 9, 96, 33, 42, 8, 91, 30, },
-{ 2, 172, 1, 144, 2, 165, 0, 137, },
-{ 77, 23, 60, 15, 72, 21, 56, 14, },
-};
-#endif
-
-static int get_entry_size(int bpp)
-{
- switch(bpp) {
- case 32:
- return 4;
- case 16:
- case 15:
- return 2;
- case 24:
- case 8:
- case 4:
- case 1:
- return 1;
- default:
- return -1;
- }
-}
-
-/**
- * Allocate table_r, table_g, and table_b
- *
- * For cache efficency reasons, these three tables are allocated
- * together, so that they are contiguous in memory
- *
- * table_r is indexed in the range
- * [-128 * 117570 / 76309, 255 + 127 * 117570 / 76309] =
- * [-197.21, 451.67] ---> [-198, 452]
- * table_b is indexed in the range
- * [-128 * 138420 / 76309, 255 + 127 * 138420 / 76309] =
- * [232.18, 485.37] ---> [-233, 486]
- * table_g is indexed in the range
- * [-128 * 78923 / 76309, 255 + 127 * 78923 / 76309] =
- * [-132.38, 386.35] ---> [-133, 387]
- *
- * Please look at the comments after Inverse_Table_6_9 to see where these
- * numbers are coming from.
- */
-static void *allocate_tables(uint8_t **table_r, uint8_t **table_g, uint8_t **table_b, int bpp)
-{
- uint8_t *table;
- int entry_size;
-
- entry_size = get_entry_size(bpp);
-
- /* First allocate the memory... */
- switch (bpp) {
- case 32:
- case 15:
- case 16:
- case 8:
- case 4:
- table = av_malloc((198 + 452 + 233 + 486 + 133 + 387) * entry_size);
- break;
- case 24:
- table = av_malloc(256 + 2 * 233);
- break;
- case 1:
- table = av_malloc (256 * 2);
- break;
- default:
- table = NULL;
- }
- if (table == NULL) {
- MSG_ERR("Cannot allocate memory for the YUV -> RGB tables!\n");
-
- return NULL;
- }
-
- /* ...and then, assign the table_* value */
- switch (bpp) {
- case 32:
- case 15:
- case 16:
- case 8:
- case 4:
- *table_r = table + 198 * entry_size;
- *table_b = table + (198 + 452 + 133 + 387 + 233) * entry_size;
- *table_g = table + (198 + 452 + 133) * entry_size;
- break;
- case 24:
- *table_r = *table_g = *table_b = table + 233;
- break;
- case 1:
- *table_g = table;
- *table_r = *table_b = NULL;
- break;
- }
-
- return table;
-}
-
-/**
- * Initialize the table_rV, table_gU[i], table_gV, and table_bU fields
- * in SwsContext
- *
- * @param inv_table the YUV -> RGB table (this is a line of Inverse_Table_6_9)
- * @param fullRange 0->MPEG YUV space 1->JPEG YUV space
-*/
-int yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4], int fullRange, int brightness, int contrast, int saturation)
-{
- int i;
- static uint8_t ytable[1024];
- int64_t cy, oy;
- int64_t crv, cbu, cgu, cgv;
- int entry_size = 0;
- uint8_t *table_r, *table_g, *table_b;
- int value;
-
- if ((inv_table[0] == 0) || (inv_table[1] == 0) || (inv_table[2] == 0) || (inv_table[3] == 0)) {
- MSG_ERR("Invalid YUV ---> RGB table!\n");
-
- return -1;
- }
- crv = inv_table[0];
- cbu = inv_table[1];
- cgu = inv_table[2];
- cgv = inv_table[3];
- if (fullRange) {
- cy = 1 << 16;
- oy = 0;
- crv= (crv*224) / 255;
- cbu= (cbu*224) / 255;
- cgu= (cgu*224) / 255;
- cgv= (cgv*224) / 255;
- //FIXME maybe its cleaner if the tables where based on full range (*244/255)
- } else {
- cy = ((1 << 16) * 255) / 219;
- oy= 16 << 16;
- }
-
- cy = (cy *contrast )>>16;
- crv= (crv*contrast * saturation)>>32;
- cbu= (cbu*contrast * saturation)>>32;
- cgu= (cgu*contrast * saturation)>>32;
- cgv= (cgv*contrast * saturation)>>32;
- oy -= 256*brightness;
-
- for (i = 0; i < 1024; i++) {
- value = (cy*(((i - YTABLE_MIN)<<16) - oy) + (1<<31))>>32;
- ytable[i] = av_clip_uint8(value);
- }
-
- entry_size = get_entry_size(fmt_depth(c->dstFormat));
- av_free(c->yuvTable);
- c->yuvTable = allocate_tables(&table_r, &table_g, &table_b, fmt_depth(c->dstFormat));
- if (c->yuvTable == NULL) {
- return -1;
- }
-
- switch (fmt_depth(c->dstFormat)) {
- case 32:
- for (i = -198; i < 256 + 197; i++) {
- value = ytable[i + YTABLE_MIN];
- if (isBGR(c->dstFormat)) {
- value <<= 16;
- }
- ((uint32_t *)table_r)[i] = value;
- }
- for (i = -133; i < 256 + 132; i++) {
- ((uint32_t *)table_g)[i] = ytable[i + YTABLE_MIN] << 8;
- }
- for (i = -233; i < 256 + 232; i++) {
- value = ytable[i + YTABLE_MIN];
- if (!isBGR(c->dstFormat)) {
- value <<= 16;
- }
- ((uint32_t *)table_b)[i] = value;
- }
- break;
-
- case 24:
- for (i = -233; i < 256 + 232; i++) {
- ((uint8_t * )table_b)[i] = ytable[i + YTABLE_MIN];
- }
- break;
-
- case 15:
- case 16:
- for (i = -198; i < 256 + 197; i++) {
- value = ytable[i + YTABLE_MIN] >> 3;
- if (isBGR(c->dstFormat)) {
- value <<= ((fmt_depth(c->dstFormat) == 16) ? 11 : 10);
- }
- ((uint16_t *)table_r)[i] = value;
- }
- for (i = -133; i < 256 + 132; i++) {
- value = ytable[i + YTABLE_MIN];
- value >>= ((fmt_depth(c->dstFormat) == 16) ? 2 : 3);
- ((uint16_t *)table_g)[i] = value << 5;
- }
- for (i = -233; i < 256 + 232; i++) {
- value = ytable[i + YTABLE_MIN] >> 3;
- if (!isBGR(c->dstFormat)) {
- value <<= ((fmt_depth(c->dstFormat) == 16) ? 11 : 10);
- }
- ((uint16_t *)table_b)[i] = value;
- }
- break;
- case 8:
- for (i = -198; i < 256 + 197; i++) {
- value = (ytable[i + YTABLE_MIN - 16] + 18) / 36;
- if (isBGR(c->dstFormat)) {
- value <<= 5;
- }
- ((uint8_t *)table_r)[i] = value;
- }
- for (i = -133; i < 256 + 132; i++) {
- value = (ytable[i + YTABLE_MIN - 16] + 18) / 36;
- if (!isBGR(c->dstFormat)) {
- value <<= 1;
- }
- ((uint8_t *)table_g)[i] = value << 2;
- }
- for (i = -233; i < 256 + 232; i++) {
- value = (ytable[i + YTABLE_MIN - 37] + 43) / 85;
- if (!isBGR(c->dstFormat)) {
- value <<= 6;
- }
- ((uint8_t *)table_b)[i] = value;
- }
- break;
- case 4:
- for (i = -198; i < 256 + 197; i++) {
- value = ytable[i + YTABLE_MIN - 110] >> 7;
- if (isBGR(c->dstFormat)) {
- value <<= 3;
- }
- ((uint8_t *)table_r)[i] = value;
- }
- for (i = -133; i < 256 + 132; i++) {
- value = (ytable[i + YTABLE_MIN - 37]+ 43) / 85;
- ((uint8_t *)table_g)[i] = value << 1;
- }
- for (i = -233; i < 256 + 232; i++) {
- value = ytable[i + YTABLE_MIN - 110] >> 7;
- if (!isBGR(c->dstFormat)) {
- value <<= 3;
- }
- ((uint8_t *)table_b)[i] = value;
- }
- break;
- case 1:
- for (i = 0; i < 256 + 256; i++) {
- value = ytable[i + YTABLE_MIN - 110] >> 7;
- ((uint8_t *)table_g)[i] = value;
- }
- break;
- default:
- MSG_ERR("%ibpp not supported by yuv2rgb\n", fmt_depth(c->dstFormat));
- av_free(c->yuvTable);
- c->yuvTable = NULL;
-
- return -1;
- }
-
- for (i = 0; i < 256; i++) {
- c->table_rV[i] = table_r +
- entry_size * ROUNDED_DIV(crv * (i - 128), 76309);
- c->table_gU[i] = table_g +
- entry_size * ROUNDED_DIV(cgu * (i - 128), 76309);
- c->table_gV[i] = entry_size * ROUNDED_DIV(cgv * (i - 128), 76309);
- c->table_bU[i] = table_b +
- entry_size * ROUNDED_DIV(cbu * (i - 128), 76309);
- }
-
- return 0;
-}
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_mlib.c b/contrib/ffmpeg/libswscale/yuv2rgb_mlib.c
index c99e019cd..ad271410f 100644
--- a/contrib/ffmpeg/libswscale/yuv2rgb_mlib.c
+++ b/contrib/ffmpeg/libswscale/yuv2rgb_mlib.c
@@ -1,24 +1,22 @@
-/*
- * yuv2rgb_mlib.c, Software YUV to RGB coverter using mediaLib
+/*
+ * yuv2rgb_mlib.c, Software YUV to RGB converter using mediaLib
+ * Copyright (C) 2003 Michael Niedermayer <michaelni@gmx.at>
*
- * Copyright (C) 2000, Håkan Hjort <d95hjort@dtek.chalmers.se>
- * All Rights Reserved.
+ * This file is part of FFmpeg.
*
- * This file is part of mpeg2dec, a free MPEG-2 video decoder
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
*
- * mpeg2dec is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
*
- * mpeg2dec is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with mpeg2dec; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <mlib_types.h>
@@ -31,56 +29,56 @@
#include "swscale.h"
-static int mlib_YUV2ARGB420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
+static int mlib_YUV2ARGB420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
if(c->srcFormat == PIX_FMT_YUV422P){
- srcStride[1] *= 2;
- srcStride[2] *= 2;
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
}
-
+
assert(srcStride[1] == srcStride[2]);
-
+
mlib_VideoColorYUV2ARGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
- srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+ srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
return srcSliceH;
}
-static int mlib_YUV2ABGR420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
+static int mlib_YUV2ABGR420_32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
if(c->srcFormat == PIX_FMT_YUV422P){
- srcStride[1] *= 2;
- srcStride[2] *= 2;
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
}
-
+
assert(srcStride[1] == srcStride[2]);
-
+
mlib_VideoColorYUV2ABGR420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
- srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+ srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
return srcSliceH;
}
-static int mlib_YUV2RGB420_24(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
+static int mlib_YUV2RGB420_24(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
if(c->srcFormat == PIX_FMT_YUV422P){
- srcStride[1] *= 2;
- srcStride[2] *= 2;
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
}
-
+
assert(srcStride[1] == srcStride[2]);
-
+
mlib_VideoColorYUV2RGB420(dst[0]+srcSliceY*dstStride[0], src[0], src[1], src[2], c->dstW,
- srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
+ srcSliceH, dstStride[0], srcStride[0], srcStride[1]);
return srcSliceH;
}
-SwsFunc yuv2rgb_init_mlib(SwsContext *c)
+SwsFunc yuv2rgb_init_mlib(SwsContext *c)
{
- switch(c->dstFormat){
- case PIX_FMT_RGB24: return mlib_YUV2RGB420_24;
- case PIX_FMT_BGR32: return mlib_YUV2ARGB420_32;
- case PIX_FMT_RGB32: return mlib_YUV2ABGR420_32;
- default: return NULL;
- }
+ switch(c->dstFormat){
+ case PIX_FMT_RGB24: return mlib_YUV2RGB420_24;
+ case PIX_FMT_BGR32: return mlib_YUV2ARGB420_32;
+ case PIX_FMT_RGB32: return mlib_YUV2ABGR420_32;
+ default: return NULL;
+ }
}
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_template.c b/contrib/ffmpeg/libswscale/yuv2rgb_template.c
index 28ee24add..d86bc4b94 100644
--- a/contrib/ffmpeg/libswscale/yuv2rgb_template.c
+++ b/contrib/ffmpeg/libswscale/yuv2rgb_template.c
@@ -1,12 +1,14 @@
-
/*
- * yuv2rgb_mmx.c, Software YUV to RGB coverter with Intel MMX "technology"
+ * yuv2rgb_mmx.c, Software YUV to RGB converter with Intel MMX "technology"
*
* Copyright (C) 2000, Silicon Integrated System Corp.
- * All Rights Reserved.
*
* Author: Olie Lho <ollie@sis.com.tw>
*
+ * 15,24 bpp and dithering from Michael Niedermayer (michaelni@gmx.at)
+ * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support)
+ * context / deglobalize stuff by Michael Niedermayer
+ *
* This file is part of mpeg2dec, a free MPEG-2 video decoder
*
* mpeg2dec is free software; you can redistribute it and/or modify
@@ -22,10 +24,6 @@
* You should have received a copy of the GNU General Public License
* along with mpeg2dec; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * 15,24 bpp and dithering from Michael Niedermayer (michaelni@gmx.at)
- * MMX/MMX2 Template stuff from Michael Niedermayer (needed for fast movntq support)
- * context / deglobalize stuff by Michael Niedermayer
*/
#undef MOVNTQ
@@ -48,259 +46,259 @@
#endif
#define YUV2RGB \
- /* Do the multiply part of the conversion for even and odd pixels,
- register usage:
- mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
- mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
- mm6 -> Y even, mm7 -> Y odd */\
- /* convert the chroma part */\
- "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
- "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+ /* Do the multiply part of the conversion for even and odd pixels,
+ register usage:
+ mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+ mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+ mm6 -> Y even, mm7 -> Y odd */\
+ /* convert the chroma part */\
+ "punpcklbw %%mm4, %%mm0;" /* scatter 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+ "punpcklbw %%mm4, %%mm1;" /* scatter 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
\
- "psllw $3, %%mm0;" /* Promote precision */ \
- "psllw $3, %%mm1;" /* Promote precision */ \
+ "psllw $3, %%mm0;" /* Promote precision */ \
+ "psllw $3, %%mm1;" /* Promote precision */ \
\
- "psubsw "U_OFFSET"(%4), %%mm0;" /* Cb -= 128 */ \
- "psubsw "V_OFFSET"(%4), %%mm1;" /* Cr -= 128 */ \
+ "psubsw "U_OFFSET"(%4), %%mm0;" /* Cb -= 128 */ \
+ "psubsw "V_OFFSET"(%4), %%mm1;" /* Cr -= 128 */ \
\
- "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
- "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
+ "movq %%mm0, %%mm2;" /* Copy 4 Cb 00 u3 00 u2 00 u1 00 u0 */ \
+ "movq %%mm1, %%mm3;" /* Copy 4 Cr 00 v3 00 v2 00 v1 00 v0 */ \
\
- "pmulhw "UG_COEFF"(%4), %%mm2;" /* Mul Cb with green coeff -> Cb green */ \
- "pmulhw "VG_COEFF"(%4), %%mm3;" /* Mul Cr with green coeff -> Cr green */ \
+ "pmulhw "UG_COEFF"(%4), %%mm2;" /* Mul Cb with green coeff -> Cb green */ \
+ "pmulhw "VG_COEFF"(%4), %%mm3;" /* Mul Cr with green coeff -> Cr green */ \
\
- "pmulhw "UB_COEFF"(%4), %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\
- "pmulhw "VR_COEFF"(%4), %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\
+ "pmulhw "UB_COEFF"(%4), %%mm0;" /* Mul Cb -> Cblue 00 b3 00 b2 00 b1 00 b0 */\
+ "pmulhw "VR_COEFF"(%4), %%mm1;" /* Mul Cr -> Cred 00 r3 00 r2 00 r1 00 r0 */\
\
- "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\
+ "paddsw %%mm3, %%mm2;" /* Cb green + Cr green -> Cgreen */\
\
- /* convert the luma part */\
- "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
- "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
+ /* convert the luma part */\
+ "movq %%mm6, %%mm7;" /* Copy 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */\
+ "pand "MANGLE(mmx_00ffw)", %%mm6;" /* get Y even 00 Y6 00 Y4 00 Y2 00 Y0 */\
\
- "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\
+ "psrlw $8, %%mm7;" /* get Y odd 00 Y7 00 Y5 00 Y3 00 Y1 */\
\
- "psllw $3, %%mm6;" /* Promote precision */\
- "psllw $3, %%mm7;" /* Promote precision */\
+ "psllw $3, %%mm6;" /* Promote precision */\
+ "psllw $3, %%mm7;" /* Promote precision */\
\
- "psubw "Y_OFFSET"(%4), %%mm6;" /* Y -= 16 */\
- "psubw "Y_OFFSET"(%4), %%mm7;" /* Y -= 16 */\
+ "psubw "Y_OFFSET"(%4), %%mm6;" /* Y -= 16 */\
+ "psubw "Y_OFFSET"(%4), %%mm7;" /* Y -= 16 */\
\
- "pmulhw "Y_COEFF"(%4), %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\
- "pmulhw "Y_COEFF"(%4), %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\
+ "pmulhw "Y_COEFF"(%4), %%mm6;" /* Mul 4 Y even 00 y6 00 y4 00 y2 00 y0 */\
+ "pmulhw "Y_COEFF"(%4), %%mm7;" /* Mul 4 Y odd 00 y7 00 y5 00 y3 00 y1 */\
\
- /* Do the addition part of the conversion for even and odd pixels,
- register usage:
- mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
- mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
- mm6 -> Y even, mm7 -> Y odd */\
- "movq %%mm0, %%mm3;" /* Copy Cblue */\
- "movq %%mm1, %%mm4;" /* Copy Cred */\
- "movq %%mm2, %%mm5;" /* Copy Cgreen */\
+ /* Do the addition part of the conversion for even and odd pixels,
+ register usage:
+ mm0 -> Cblue, mm1 -> Cred, mm2 -> Cgreen even pixels,
+ mm3 -> Cblue, mm4 -> Cred, mm5 -> Cgreen odd pixels,
+ mm6 -> Y even, mm7 -> Y odd */\
+ "movq %%mm0, %%mm3;" /* Copy Cblue */\
+ "movq %%mm1, %%mm4;" /* Copy Cred */\
+ "movq %%mm2, %%mm5;" /* Copy Cgreen */\
\
- "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */\
- "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */\
+ "paddsw %%mm6, %%mm0;" /* Y even + Cblue 00 B6 00 B4 00 B2 00 B0 */\
+ "paddsw %%mm7, %%mm3;" /* Y odd + Cblue 00 B7 00 B5 00 B3 00 B1 */\
\
- "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */\
- "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */\
+ "paddsw %%mm6, %%mm1;" /* Y even + Cred 00 R6 00 R4 00 R2 00 R0 */\
+ "paddsw %%mm7, %%mm4;" /* Y odd + Cred 00 R7 00 R5 00 R3 00 R1 */\
\
- "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */\
- "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */\
+ "paddsw %%mm6, %%mm2;" /* Y even + Cgreen 00 G6 00 G4 00 G2 00 G0 */\
+ "paddsw %%mm7, %%mm5;" /* Y odd + Cgreen 00 G7 00 G5 00 G3 00 G1 */\
\
- /* Limit RGB even to 0..255 */\
- "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */\
- "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */\
- "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */\
+ /* Limit RGB even to 0..255 */\
+ "packuswb %%mm0, %%mm0;" /* B6 B4 B2 B0 B6 B4 B2 B0 */\
+ "packuswb %%mm1, %%mm1;" /* R6 R4 R2 R0 R6 R4 R2 R0 */\
+ "packuswb %%mm2, %%mm2;" /* G6 G4 G2 G0 G6 G4 G2 G0 */\
\
- /* Limit RGB odd to 0..255 */\
- "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */\
- "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */\
- "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */\
+ /* Limit RGB odd to 0..255 */\
+ "packuswb %%mm3, %%mm3;" /* B7 B5 B3 B1 B7 B5 B3 B1 */\
+ "packuswb %%mm4, %%mm4;" /* R7 R5 R3 R1 R7 R5 R3 R1 */\
+ "packuswb %%mm5, %%mm5;" /* G7 G5 G3 G1 G7 G5 G3 G1 */\
\
- /* Interleave RGB even and odd */\
- "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\
- "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\
- "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\
+ /* Interleave RGB even and odd */\
+ "punpcklbw %%mm3, %%mm0;" /* B7 B6 B5 B4 B3 B2 B1 B0 */\
+ "punpcklbw %%mm4, %%mm1;" /* R7 R6 R5 R4 R3 R2 R1 R0 */\
+ "punpcklbw %%mm5, %%mm2;" /* G7 G6 G5 G4 G3 G2 G1 G0 */\
static inline int RENAME(yuv420_rgb16)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
int y, h_size;
if(c->srcFormat == PIX_FMT_YUV422P){
- srcStride[1] *= 2;
- srcStride[2] *= 2;
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
}
h_size= (c->dstW+7)&~7;
if(h_size*2 > FFABS(dstStride[0])) h_size-=8;
-
+
__asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
-//printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
-//srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
+ //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
+ //srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
for (y= 0; y<srcSliceH; y++ ) {
- uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
- uint8_t *_py = src[0] + y*srcStride[0];
- uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
- uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
- long index= -h_size/2;
-
- b5Dither= dither8[y&1];
- g6Dither= dither4[y&1];
- g5Dither= dither8[y&1];
- r5Dither= dither8[(y+1)&1];
- /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
- pixels in each iteration */
- __asm__ __volatile__ (
- /* load data for start of next scan line */
- "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
- "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
-// ".balign 16 \n\t"
- "1: \n\t"
-/* no speed diference on my p3@500 with prefetch,
- * if it is faster for anyone with -benchmark then tell me
- PREFETCH" 64(%0) \n\t"
- PREFETCH" 64(%1) \n\t"
- PREFETCH" 64(%2) \n\t"
-*/
+ uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *py = src[0] + y*srcStride[0];
+ uint8_t *pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *pv = src[2] + (y>>1)*srcStride[2];
+ long index= -h_size/2;
+
+ b5Dither= ff_dither8[y&1];
+ g6Dither= ff_dither4[y&1];
+ g5Dither= ff_dither8[y&1];
+ r5Dither= ff_dither8[(y+1)&1];
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ //".balign 16 \n\t"
+ "1: \n\t"
+ /* no speed diference on my p3@500 with prefetch,
+ * if it is faster for anyone with -benchmark then tell me
+ PREFETCH" 64(%0) \n\t"
+ PREFETCH" 64(%1) \n\t"
+ PREFETCH" 64(%2) \n\t"
+ */
YUV2RGB
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm0;"
- "paddusb "MANGLE(g6Dither)", %%mm2;"
- "paddusb "MANGLE(r5Dither)", %%mm1;"
+ "paddusb "MANGLE(b5Dither)", %%mm0;"
+ "paddusb "MANGLE(g6Dither)", %%mm2;"
+ "paddusb "MANGLE(r5Dither)", %%mm1;"
#endif
- /* mask unneeded bits off */
- "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
- "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
- "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
-
- "psrlw $3,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
- "pxor %%mm4, %%mm4;" /* zero mm4 */
-
- "movq %%mm0, %%mm5;" /* Copy B7-B0 */
- "movq %%mm2, %%mm7;" /* Copy G7-G0 */
-
- /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
- "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
- "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
-
- "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
- "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
-
- "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
- MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
-
- /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
- "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
- "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
-
- "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
- "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
-
- "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
- "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
-
- MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
-
- "add $16, %1 \n\t"
- "add $4, %0 \n\t"
- " js 1b \n\t"
-
- : "+r" (index), "+r" (_image)
- : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
- );
+ /* mask unneeded bits off */
+ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand "MANGLE(mmx_grnmask)", %%mm2;" /* g7g6g5g4 g3g2_0_0 g7g6g5g4 g3g2_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+ "psrlw $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $3, %%mm2;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
+
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3g2_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+
+ "psllw $3, %%mm7;" /* 0_0_0_0 0_g7g6g5 g4g3g2_0 0_0_0_0 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "por %%mm7, %%mm5;" /* r7r6r5r4 r3g7g6g5 g4g3g2b7 b6b5b4b3 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+
+ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
+
+ "add $16, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r" (index), "+r" (image)
+ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index)
+ );
}
__asm__ __volatile__ (EMMS);
-
+
return srcSliceH;
}
static inline int RENAME(yuv420_rgb15)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
int y, h_size;
if(c->srcFormat == PIX_FMT_YUV422P){
- srcStride[1] *= 2;
- srcStride[2] *= 2;
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
}
h_size= (c->dstW+7)&~7;
if(h_size*2 > FFABS(dstStride[0])) h_size-=8;
-
+
__asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
-//printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
-//srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
+ //printf("%X %X %X %X %X %X %X %X %X %X\n", (int)&c->redDither, (int)&b5Dither, (int)src[0], (int)src[1], (int)src[2], (int)dst[0],
+ //srcStride[0],srcStride[1],srcStride[2],dstStride[0]);
for (y= 0; y<srcSliceH; y++ ) {
- uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
- uint8_t *_py = src[0] + y*srcStride[0];
- uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
- uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
- long index= -h_size/2;
-
- b5Dither= dither8[y&1];
- g6Dither= dither4[y&1];
- g5Dither= dither8[y&1];
- r5Dither= dither8[(y+1)&1];
- /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
- pixels in each iteration */
- __asm__ __volatile__ (
- /* load data for start of next scan line */
- "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
- "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
-// ".balign 16 \n\t"
- "1: \n\t"
+ uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *py = src[0] + y*srcStride[0];
+ uint8_t *pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *pv = src[2] + (y>>1)*srcStride[2];
+ long index= -h_size/2;
+
+ b5Dither= ff_dither8[y&1];
+ g6Dither= ff_dither4[y&1];
+ g5Dither= ff_dither8[y&1];
+ r5Dither= ff_dither8[(y+1)&1];
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ //".balign 16 \n\t"
+ "1: \n\t"
YUV2RGB
#ifdef DITHER1XBPP
- "paddusb "MANGLE(b5Dither)", %%mm0 \n\t"
- "paddusb "MANGLE(g5Dither)", %%mm2 \n\t"
- "paddusb "MANGLE(r5Dither)", %%mm1 \n\t"
+ "paddusb "MANGLE(b5Dither)", %%mm0 \n\t"
+ "paddusb "MANGLE(g5Dither)", %%mm2 \n\t"
+ "paddusb "MANGLE(r5Dither)", %%mm1 \n\t"
#endif
- /* mask unneeded bits off */
- "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
- "pand "MANGLE(mmx_redmask)", %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */
- "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+ /* mask unneeded bits off */
+ "pand "MANGLE(mmx_redmask)", %%mm0;" /* b7b6b5b4 b3_0_0_0 b7b6b5b4 b3_0_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm2;" /* g7g6g5g4 g3_0_0_0 g7g6g5g4 g3_0_0_0 */
+ "pand "MANGLE(mmx_redmask)", %%mm1;" /* r7r6r5r4 r3_0_0_0 r7r6r5r4 r3_0_0_0 */
+
+ "psrlw $3, %%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
+ "psrlw $1, %%mm1;" /* 0_r7r6r5 r4r3_0_0 0_r7r6r5 r4r3_0_0 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
- "psrlw $3,%%mm0;" /* 0_0_0_b7 b6b5b4b3 0_0_0_b7 b6b5b4b3 */
- "psrlw $1,%%mm1;" /* 0_r7r6r5 r4r3_0_0 0_r7r6r5 r4r3_0_0 */
- "pxor %%mm4, %%mm4;" /* zero mm4 */
+ "movq %%mm0, %%mm5;" /* Copy B7-B0 */
+ "movq %%mm2, %%mm7;" /* Copy G7-G0 */
- "movq %%mm0, %%mm5;" /* Copy B7-B0 */
- "movq %%mm2, %%mm7;" /* Copy G7-G0 */
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3_0_0_0 */
+ "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
- /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
- "punpcklbw %%mm4, %%mm2;" /* 0_0_0_0 0_0_0_0 g7g6g5g4 g3_0_0_0 */
- "punpcklbw %%mm1, %%mm0;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+ "psllw $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+ "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
- "psllw $2, %%mm2;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
- "por %%mm2, %%mm0;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
- "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
- MOVNTQ " %%mm0, (%1);" /* store pixel 0-3 */
+ /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
+ "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */
+ "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
- /* convert rgb24 plane to rgb16 pack for pixel 0-3 */
- "punpckhbw %%mm4, %%mm7;" /* 0_0_0_0 0_0_0_0 0_g7g6g5 g4g3_0_0 */
- "punpckhbw %%mm1, %%mm5;" /* r7r6r5r4 r3_0_0_0 0_0_0_b7 b6b5b4b3 */
+ "psllw $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
- "psllw $2, %%mm7;" /* 0_0_0_0 0_0_g7g6 g5g4g3_0 0_0_0_0 */
- "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "por %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "por %%mm7, %%mm5;" /* 0_r7r6r5 r4r3g7g6 g5g4g3b7 b6b5b4b3 */
- "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
- MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
-
- "add $16, %1 \n\t"
- "add $4, %0 \n\t"
- " js 1b \n\t"
- : "+r" (index), "+r" (_image)
- : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
- );
+ "add $16, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+ : "+r" (index), "+r" (image)
+ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index)
+ );
}
__asm__ __volatile__ (EMMS);
@@ -308,145 +306,145 @@ YUV2RGB
}
static inline int RENAME(yuv420_rgb24)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
int y, h_size;
if(c->srcFormat == PIX_FMT_YUV422P){
- srcStride[1] *= 2;
- srcStride[2] *= 2;
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
}
h_size= (c->dstW+7)&~7;
if(h_size*3 > FFABS(dstStride[0])) h_size-=8;
-
+
__asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
for (y= 0; y<srcSliceH; y++ ) {
- uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
- uint8_t *_py = src[0] + y*srcStride[0];
- uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
- uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
- long index= -h_size/2;
-
- /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
- pixels in each iteration */
- __asm__ __volatile__ (
- /* load data for start of next scan line */
- "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
- "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
-// ".balign 16 \n\t"
- "1: \n\t"
+ uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *py = src[0] + y*srcStride[0];
+ uint8_t *pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *pv = src[2] + (y>>1)*srcStride[2];
+ long index= -h_size/2;
+
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ //".balign 16 \n\t"
+ "1: \n\t"
YUV2RGB
- /* mm0=B, %%mm2=G, %%mm1=R */
+ /* mm0=B, %%mm2=G, %%mm1=R */
#ifdef HAVE_MMX2
- "movq "MANGLE(M24A)", %%mm4 \n\t"
- "movq "MANGLE(M24C)", %%mm7 \n\t"
- "pshufw $0x50, %%mm0, %%mm5 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */
- "pshufw $0x50, %%mm2, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */
- "pshufw $0x00, %%mm1, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */
-
- "pand %%mm4, %%mm5 \n\t" /* B2 B1 B0 */
- "pand %%mm4, %%mm3 \n\t" /* G2 G1 G0 */
- "pand %%mm7, %%mm6 \n\t" /* R1 R0 */
-
- "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */
- "por %%mm5, %%mm6 \n\t"
- "por %%mm3, %%mm6 \n\t"
- MOVNTQ" %%mm6, (%1) \n\t"
-
- "psrlq $8, %%mm2 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */
- "pshufw $0xA5, %%mm0, %%mm5 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */
- "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */
- "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */
-
- "pand "MANGLE(M24B)", %%mm5 \n\t" /* B5 B4 B3 */
- "pand %%mm7, %%mm3 \n\t" /* G4 G3 */
- "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */
-
- "por %%mm5, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */
- "por %%mm3, %%mm6 \n\t"
- MOVNTQ" %%mm6, 8(%1) \n\t"
-
- "pshufw $0xFF, %%mm0, %%mm5 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */
- "pshufw $0xFA, %%mm2, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */
- "pshufw $0xFA, %%mm1, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */
- "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
-
- "pand %%mm7, %%mm5 \n\t" /* B7 B6 */
- "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */
- "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */
- "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq "MANGLE(ff_M24A)", %%mm4 \n\t"
+ "movq "MANGLE(ff_M24C)", %%mm7 \n\t"
+ "pshufw $0x50, %%mm0, %%mm5 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */
+ "pshufw $0x50, %%mm2, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */
+ "pshufw $0x00, %%mm1, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */
+
+ "pand %%mm4, %%mm5 \n\t" /* B2 B1 B0 */
+ "pand %%mm4, %%mm3 \n\t" /* G2 G1 G0 */
+ "pand %%mm7, %%mm6 \n\t" /* R1 R0 */
+
+ "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */
+ "por %%mm5, %%mm6 \n\t"
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, (%1) \n\t"
+
+ "psrlq $8, %%mm2 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */
+ "pshufw $0xA5, %%mm0, %%mm5 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */
+ "pshufw $0x55, %%mm2, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */
+ "pshufw $0xA5, %%mm1, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */
+
+ "pand "MANGLE(ff_M24B)", %%mm5 \n\t" /* B5 B4 B3 */
+ "pand %%mm7, %%mm3 \n\t" /* G4 G3 */
+ "pand %%mm4, %%mm6 \n\t" /* R4 R3 R2 */
+
+ "por %%mm5, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, 8(%1) \n\t"
+
+ "pshufw $0xFF, %%mm0, %%mm5 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */
+ "pshufw $0xFA, %%mm2, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */
+ "pshufw $0xFA, %%mm1, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "pand %%mm7, %%mm5 \n\t" /* B7 B6 */
+ "pand %%mm4, %%mm3 \n\t" /* G7 G6 G5 */
+ "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
\
- "por %%mm5, %%mm3 \n\t"
- "por %%mm3, %%mm6 \n\t"
- MOVNTQ" %%mm6, 16(%1) \n\t"
- "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
- "pxor %%mm4, %%mm4 \n\t"
+ "por %%mm5, %%mm3 \n\t"
+ "por %%mm3, %%mm6 \n\t"
+ MOVNTQ" %%mm6, 16(%1) \n\t"
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ "pxor %%mm4, %%mm4 \n\t"
#else
- "pxor %%mm4, %%mm4 \n\t"
- "movq %%mm0, %%mm5 \n\t" /* B */
- "movq %%mm1, %%mm6 \n\t" /* R */
- "punpcklbw %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */
- "punpcklbw %%mm4, %%mm1 \n\t" /* 0R0R0R0R 0 */
- "punpckhbw %%mm2, %%mm5 \n\t" /* GBGBGBGB 2 */
- "punpckhbw %%mm4, %%mm6 \n\t" /* 0R0R0R0R 2 */
- "movq %%mm0, %%mm7 \n\t" /* GBGBGBGB 0 */
- "movq %%mm5, %%mm3 \n\t" /* GBGBGBGB 2 */
- "punpcklwd %%mm1, %%mm7 \n\t" /* 0RGB0RGB 0 */
- "punpckhwd %%mm1, %%mm0 \n\t" /* 0RGB0RGB 1 */
- "punpcklwd %%mm6, %%mm5 \n\t" /* 0RGB0RGB 2 */
- "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */
-
- "movq %%mm7, %%mm2 \n\t" /* 0RGB0RGB 0 */
- "movq %%mm0, %%mm6 \n\t" /* 0RGB0RGB 1 */
- "movq %%mm5, %%mm1 \n\t" /* 0RGB0RGB 2 */
- "movq %%mm3, %%mm4 \n\t" /* 0RGB0RGB 3 */
-
- "psllq $40, %%mm7 \n\t" /* RGB00000 0 */
- "psllq $40, %%mm0 \n\t" /* RGB00000 1 */
- "psllq $40, %%mm5 \n\t" /* RGB00000 2 */
- "psllq $40, %%mm3 \n\t" /* RGB00000 3 */
-
- "punpckhdq %%mm2, %%mm7 \n\t" /* 0RGBRGB0 0 */
- "punpckhdq %%mm6, %%mm0 \n\t" /* 0RGBRGB0 1 */
- "punpckhdq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */
- "punpckhdq %%mm4, %%mm3 \n\t" /* 0RGBRGB0 3 */
-
- "psrlq $8, %%mm7 \n\t" /* 00RGBRGB 0 */
- "movq %%mm0, %%mm6 \n\t" /* 0RGBRGB0 1 */
- "psllq $40, %%mm0 \n\t" /* GB000000 1 */
- "por %%mm0, %%mm7 \n\t" /* GBRGBRGB 0 */
- MOVNTQ" %%mm7, (%1) \n\t"
-
- "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
-
- "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */
- "movq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */
- "psllq $24, %%mm5 \n\t" /* BRGB0000 2 */
- "por %%mm5, %%mm6 \n\t" /* BRGBRGBR 1 */
- MOVNTQ" %%mm6, 8(%1) \n\t"
-
- "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
-
- "psrlq $40, %%mm1 \n\t" /* 000000RG 2 */
- "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */
- "por %%mm3, %%mm1 \n\t" /* RGBRGBRG 2 */
- MOVNTQ" %%mm1, 16(%1) \n\t"
-
- "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "pxor %%mm4, %%mm4 \n\t"
+ "pxor %%mm4, %%mm4 \n\t"
+ "movq %%mm0, %%mm5 \n\t" /* B */
+ "movq %%mm1, %%mm6 \n\t" /* R */
+ "punpcklbw %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */
+ "punpcklbw %%mm4, %%mm1 \n\t" /* 0R0R0R0R 0 */
+ "punpckhbw %%mm2, %%mm5 \n\t" /* GBGBGBGB 2 */
+ "punpckhbw %%mm4, %%mm6 \n\t" /* 0R0R0R0R 2 */
+ "movq %%mm0, %%mm7 \n\t" /* GBGBGBGB 0 */
+ "movq %%mm5, %%mm3 \n\t" /* GBGBGBGB 2 */
+ "punpcklwd %%mm1, %%mm7 \n\t" /* 0RGB0RGB 0 */
+ "punpckhwd %%mm1, %%mm0 \n\t" /* 0RGB0RGB 1 */
+ "punpcklwd %%mm6, %%mm5 \n\t" /* 0RGB0RGB 2 */
+ "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */
+
+ "movq %%mm7, %%mm2 \n\t" /* 0RGB0RGB 0 */
+ "movq %%mm0, %%mm6 \n\t" /* 0RGB0RGB 1 */
+ "movq %%mm5, %%mm1 \n\t" /* 0RGB0RGB 2 */
+ "movq %%mm3, %%mm4 \n\t" /* 0RGB0RGB 3 */
+
+ "psllq $40, %%mm7 \n\t" /* RGB00000 0 */
+ "psllq $40, %%mm0 \n\t" /* RGB00000 1 */
+ "psllq $40, %%mm5 \n\t" /* RGB00000 2 */
+ "psllq $40, %%mm3 \n\t" /* RGB00000 3 */
+
+ "punpckhdq %%mm2, %%mm7 \n\t" /* 0RGBRGB0 0 */
+ "punpckhdq %%mm6, %%mm0 \n\t" /* 0RGBRGB0 1 */
+ "punpckhdq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */
+ "punpckhdq %%mm4, %%mm3 \n\t" /* 0RGBRGB0 3 */
+
+ "psrlq $8, %%mm7 \n\t" /* 00RGBRGB 0 */
+ "movq %%mm0, %%mm6 \n\t" /* 0RGBRGB0 1 */
+ "psllq $40, %%mm0 \n\t" /* GB000000 1 */
+ "por %%mm0, %%mm7 \n\t" /* GBRGBRGB 0 */
+ MOVNTQ" %%mm7, (%1) \n\t"
+
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+
+ "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */
+ "movq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */
+ "psllq $24, %%mm5 \n\t" /* BRGB0000 2 */
+ "por %%mm5, %%mm6 \n\t" /* BRGBRGBR 1 */
+ MOVNTQ" %%mm6, 8(%1) \n\t"
+
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+
+ "psrlq $40, %%mm1 \n\t" /* 000000RG 2 */
+ "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */
+ "por %%mm3, %%mm1 \n\t" /* RGBRGBRG 2 */
+ MOVNTQ" %%mm1, 16(%1) \n\t"
+
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "pxor %%mm4, %%mm4 \n\t"
#endif
-
- "add $24, %1 \n\t"
- "add $4, %0 \n\t"
- " js 1b \n\t"
-
- : "+r" (index), "+r" (_image)
- : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
- );
+
+ "add $24, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
+
+ : "+r" (index), "+r" (image)
+ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index)
+ );
}
__asm__ __volatile__ (EMMS);
@@ -454,85 +452,85 @@ YUV2RGB
}
static inline int RENAME(yuv420_rgb32)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
- int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
int y, h_size;
if(c->srcFormat == PIX_FMT_YUV422P){
- srcStride[1] *= 2;
- srcStride[2] *= 2;
+ srcStride[1] *= 2;
+ srcStride[2] *= 2;
}
h_size= (c->dstW+7)&~7;
if(h_size*4 > FFABS(dstStride[0])) h_size-=8;
-
+
__asm__ __volatile__ ("pxor %mm4, %mm4;" /* zero mm4 */ );
for (y= 0; y<srcSliceH; y++ ) {
- uint8_t *_image = dst[0] + (y+srcSliceY)*dstStride[0];
- uint8_t *_py = src[0] + y*srcStride[0];
- uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
- uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
- long index= -h_size/2;
-
- /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
- pixels in each iteration */
- __asm__ __volatile__ (
- /* load data for start of next scan line */
- "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
- "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
-// ".balign 16 \n\t"
- "1: \n\t"
+ uint8_t *image = dst[0] + (y+srcSliceY)*dstStride[0];
+ uint8_t *py = src[0] + y*srcStride[0];
+ uint8_t *pu = src[1] + (y>>1)*srcStride[1];
+ uint8_t *pv = src[2] + (y>>1)*srcStride[2];
+ long index= -h_size/2;
+
+ /* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
+ pixels in each iteration */
+ __asm__ __volatile__ (
+ /* load data for start of next scan line */
+ "movd (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "movq (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ //".balign 16 \n\t"
+ "1: \n\t"
YUV2RGB
- /* convert RGB plane to RGB packed format,
- mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
- mm4 -> GB, mm5 -> AR pixel 4-7,
- mm6 -> GB, mm7 -> AR pixel 0-3 */
- "pxor %%mm3, %%mm3;" /* zero mm3 */
+ /* convert RGB plane to RGB packed format,
+ mm0 -> B, mm1 -> R, mm2 -> G, mm3 -> 0,
+ mm4 -> GB, mm5 -> AR pixel 4-7,
+ mm6 -> GB, mm7 -> AR pixel 0-3 */
+ "pxor %%mm3, %%mm3;" /* zero mm3 */
+
+ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "movq %%mm1, %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
- "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
- "movq %%mm1, %%mm7;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
+ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "movq %%mm1, %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
- "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
- "movq %%mm1, %%mm5;" /* R7 R6 R5 R4 R3 R2 R1 R0 */
+ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
+ "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */
- "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
- "punpcklbw %%mm3, %%mm7;" /* 00 R3 00 R2 00 R1 00 R0 */
+ "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */
+ MOVNTQ " %%mm6, (%1);" /* Store ARGB1 ARGB0 */
- "punpcklwd %%mm7, %%mm6;" /* 00 R1 B1 G1 00 R0 B0 G0 */
- MOVNTQ " %%mm6, (%1);" /* Store ARGB1 ARGB0 */
+ "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
- "movq %%mm0, %%mm6;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
- "punpcklbw %%mm2, %%mm6;" /* G3 B3 G2 B2 G1 B1 G0 B0 */
+ "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */
+ MOVNTQ " %%mm6, 8 (%1);" /* Store ARGB3 ARGB2 */
- "punpckhwd %%mm7, %%mm6;" /* 00 R3 G3 B3 00 R2 B3 G2 */
- MOVNTQ " %%mm6, 8 (%1);" /* Store ARGB3 ARGB2 */
+ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
+ "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */
- "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
- "punpckhbw %%mm3, %%mm5;" /* 00 R7 00 R6 00 R5 00 R4 */
+ "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */
+ MOVNTQ " %%mm4, 16 (%1);" /* Store ARGB5 ARGB4 */
- "punpcklwd %%mm5, %%mm4;" /* 00 R5 B5 G5 00 R4 B4 G4 */
- MOVNTQ " %%mm4, 16 (%1);" /* Store ARGB5 ARGB4 */
+ "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
+ "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
- "movq %%mm0, %%mm4;" /* B7 B6 B5 B4 B3 B2 B1 B0 */
- "punpckhbw %%mm2, %%mm4;" /* G7 B7 G6 B6 G5 B5 G4 B4 */
+ "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */
+ MOVNTQ " %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */
- "punpckhwd %%mm5, %%mm4;" /* 00 R7 G7 B7 00 R6 B6 G6 */
- MOVNTQ " %%mm4, 24 (%1);" /* Store ARGB7 ARGB6 */
+ "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
+ "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
- "movd 4 (%2, %0), %%mm0;" /* Load 4 Cb 00 00 00 00 u3 u2 u1 u0 */
- "movd 4 (%3, %0), %%mm1;" /* Load 4 Cr 00 00 00 00 v3 v2 v1 v0 */
+ "pxor %%mm4, %%mm4;" /* zero mm4 */
+ "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
- "pxor %%mm4, %%mm4;" /* zero mm4 */
- "movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
+ "add $32, %1 \n\t"
+ "add $4, %0 \n\t"
+ " js 1b \n\t"
- "add $32, %1 \n\t"
- "add $4, %0 \n\t"
- " js 1b \n\t"
-
- : "+r" (index), "+r" (_image)
- : "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
- );
+ : "+r" (index), "+r" (image)
+ : "r" (pu - index), "r" (pv - index), "r"(&c->redDither), "r" (py - 2*index)
+ );
}
__asm__ __volatile__ (EMMS);
diff --git a/contrib/ffmpeg/libswscale/yuv2rgb_vis.c b/contrib/ffmpeg/libswscale/yuv2rgb_vis.c
new file mode 100644
index 000000000..618508c47
--- /dev/null
+++ b/contrib/ffmpeg/libswscale/yuv2rgb_vis.c
@@ -0,0 +1,206 @@
+/*
+ * VIS optimized software YUV to RGB converter
+ * Copyright (c) 2007 Denes Balatoni <dbalatoni@programozo.hu>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <stdlib.h>
+
+#include "swscale.h"
+
+#define YUV2RGB_INIT \
+ "wr %%g0, 0x10, %%gsr \n\t" \
+ "ldd [%5], %%f32 \n\t" \
+ "ldd [%5+8], %%f34 \n\t" \
+ "ldd [%5+16], %%f36 \n\t" \
+ "ldd [%5+24], %%f38 \n\t" \
+ "ldd [%5+32], %%f40 \n\t" \
+ "ldd [%5+40], %%f42 \n\t" \
+ "ldd [%5+48], %%f44 \n\t" \
+ "ldd [%5+56], %%f46 \n\t" \
+ "ldd [%5+64], %%f48 \n\t" \
+ "ldd [%5+72], %%f50 \n\t"
+
+#define YUV2RGB_KERNEL \
+ /* ^^^^ f0=Y f3=u f5=v */ \
+ "fmul8x16 %%f3, %%f48, %%f6 \n\t" \
+ "fmul8x16 %%f19, %%f48, %%f22 \n\t" \
+ "fmul8x16 %%f5, %%f44, %%f8 \n\t" \
+ "fmul8x16 %%f21, %%f44, %%f24 \n\t" \
+ "fmul8x16 %%f0, %%f42, %%f0 \n\t" \
+ "fmul8x16 %%f16, %%f42, %%f16 \n\t" \
+ "fmul8x16 %%f3, %%f50, %%f2 \n\t" \
+ "fmul8x16 %%f19, %%f50, %%f18 \n\t" \
+ "fmul8x16 %%f5, %%f46, %%f4 \n\t" \
+ "fmul8x16 %%f21, %%f46, %%f20 \n\t" \
+ \
+ "fpsub16 %%f6, %%f34, %%f6 \n\t" /* 1 */ \
+ "fpsub16 %%f22, %%f34, %%f22 \n\t" /* 1 */ \
+ "fpsub16 %%f8, %%f38, %%f8 \n\t" /* 3 */ \
+ "fpsub16 %%f24, %%f38, %%f24 \n\t" /* 3 */ \
+ "fpsub16 %%f0, %%f32, %%f0 \n\t" /* 0 */ \
+ "fpsub16 %%f16, %%f32, %%f16 \n\t" /* 0 */ \
+ "fpsub16 %%f2, %%f36, %%f2 \n\t" /* 2 */ \
+ "fpsub16 %%f18, %%f36, %%f18 \n\t" /* 2 */ \
+ "fpsub16 %%f4, %%f40, %%f4 \n\t" /* 4 */ \
+ "fpsub16 %%f20, %%f40, %%f20 \n\t" /* 4 */ \
+ \
+ "fpadd16 %%f0, %%f8, %%f8 \n\t" /* Gt */ \
+ "fpadd16 %%f16, %%f24, %%f24 \n\t" /* Gt */ \
+ "fpadd16 %%f0, %%f4, %%f4 \n\t" /* R */ \
+ "fpadd16 %%f16, %%f20, %%f20 \n\t" /* R */ \
+ "fpadd16 %%f0, %%f6, %%f6 \n\t" /* B */ \
+ "fpadd16 %%f16, %%f22, %%f22 \n\t" /* B */ \
+ "fpadd16 %%f8, %%f2, %%f2 \n\t" /* G */ \
+ "fpadd16 %%f24, %%f18, %%f18 \n\t" /* G */ \
+ \
+ "fpack16 %%f4, %%f4 \n\t" \
+ "fpack16 %%f20, %%f20 \n\t" \
+ "fpack16 %%f6, %%f6 \n\t" \
+ "fpack16 %%f22, %%f22 \n\t" \
+ "fpack16 %%f2, %%f2 \n\t" \
+ "fpack16 %%f18, %%f18 \n\t"
+
+
+
+static int vis_420P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, out1, out2, out3, out4, out5, out6;
+
+ for(y=0;y < srcSliceH;++y) {
+ asm volatile (
+ YUV2RGB_INIT
+ "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */
+ "1: \n\t"
+ "ldda [%1] %%asi, %%f2 \n\t"
+ "ldda [%1+2] %%asi, %%f18 \n\t"
+ "ldda [%2] %%asi, %%f4 \n\t"
+ "ldda [%2+2] %%asi, %%f20 \n\t"
+ "ld [%0], %%f0 \n\t"
+ "ld [%0+4], %%f16 \n\t"
+ "fpmerge %%f3, %%f3, %%f2 \n\t"
+ "fpmerge %%f19, %%f19, %%f18 \n\t"
+ "fpmerge %%f5, %%f5, %%f4 \n\t"
+ "fpmerge %%f21, %%f21, %%f20 \n\t"
+ YUV2RGB_KERNEL
+ "fzero %%f0 \n\t"
+ "fpmerge %%f4, %%f6, %%f8 \n\t" // r,b,t1
+ "fpmerge %%f20, %%f22, %%f24 \n\t" // r,b,t1
+ "fpmerge %%f0, %%f2, %%f10 \n\t" // 0,g,t2
+ "fpmerge %%f0, %%f18, %%f26 \n\t" // 0,g,t2
+ "fpmerge %%f10, %%f8, %%f4 \n\t" // t2,t1,msb
+ "fpmerge %%f26, %%f24, %%f20 \n\t" // t2,t1,msb
+ "fpmerge %%f11, %%f9, %%f6 \n\t" // t2,t1,lsb
+ "fpmerge %%f27, %%f25, %%f22 \n\t" // t2,t1,lsb
+ "std %%f4, [%3] \n\t"
+ "std %%f20, [%3+16] \n\t"
+ "std %%f6, [%3+8] \n\t"
+ "std %%f22, [%3+24] \n\t"
+
+ "add %0, 8, %0 \n\t"
+ "add %1, 4, %1 \n\t"
+ "add %2, 4, %2 \n\t"
+ "subcc %4, 8, %4 \n\t"
+ "bne 1b \n\t"
+ "add %3, 32, %3 \n\t" //delay slot
+ : "=r" (out1), "=r" (out2), "=r" (out3), "=r" (out4), "=r" (out5), "=r" (out6)
+ : "0" (src[0]+(y+srcSliceY)*srcStride[0]), "1" (src[1]+((y+srcSliceY)>>1)*srcStride[1]),
+ "2" (src[2]+((y+srcSliceY)>>1)*srcStride[2]), "3" (dst[0]+(y+srcSliceY)*dstStride[0]),
+ "4" (c->dstW),
+ "5" (c->sparc_coeffs)
+ );
+ }
+
+ return srcSliceH;
+}
+
+static int vis_422P_ARGB32(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]){
+ int y, out1, out2, out3, out4, out5, out6;
+
+ for(y=0;y < srcSliceH;++y) {
+ asm volatile (
+ YUV2RGB_INIT
+ "wr %%g0, 0xd2, %%asi \n\t" /* ASI_FL16_P */
+ "1: \n\t"
+ "ldda [%1] %%asi, %%f2 \n\t"
+ "ldda [%1+2] %%asi, %%f18 \n\t"
+ "ldda [%2] %%asi, %%f4 \n\t"
+ "ldda [%2+2] %%asi, %%f20 \n\t"
+ "ld [%0], %%f0 \n\t"
+ "ld [%0+4], %%f16 \n\t"
+ "fpmerge %%f3, %%f3, %%f2 \n\t"
+ "fpmerge %%f19, %%f19, %%f18 \n\t"
+ "fpmerge %%f5, %%f5, %%f4 \n\t"
+ "fpmerge %%f21, %%f21, %%f20 \n\t"
+ YUV2RGB_KERNEL
+ "fzero %%f0 \n\t"
+ "fpmerge %%f4, %%f6, %%f8 \n\t" // r,b,t1
+ "fpmerge %%f20, %%f22, %%f24 \n\t" // r,b,t1
+ "fpmerge %%f0, %%f2, %%f10 \n\t" // 0,g,t2
+ "fpmerge %%f0, %%f18, %%f26 \n\t" // 0,g,t2
+ "fpmerge %%f10, %%f8, %%f4 \n\t" // t2,t1,msb
+ "fpmerge %%f26, %%f24, %%f20 \n\t" // t2,t1,msb
+ "fpmerge %%f11, %%f9, %%f6 \n\t" // t2,t1,lsb
+ "fpmerge %%f27, %%f25, %%f22 \n\t" // t2,t1,lsb
+ "std %%f4, [%3] \n\t"
+ "std %%f20, [%3+16] \n\t"
+ "std %%f6, [%3+8] \n\t"
+ "std %%f22, [%3+24] \n\t"
+
+ "add %0, 8, %0 \n\t"
+ "add %1, 4, %1 \n\t"
+ "add %2, 4, %2 \n\t"
+ "subcc %4, 8, %4 \n\t"
+ "bne 1b \n\t"
+ "add %3, 32, %3 \n\t" //delay slot
+ : "=r" (out1), "=r" (out2), "=r" (out3), "=r" (out4), "=r" (out5), "=r" (out6)
+ : "0" (src[0]+(y+srcSliceY)*srcStride[0]), "1" (src[1]+(y+srcSliceY)*srcStride[1]),
+ "2" (src[2]+(y+srcSliceY)*srcStride[2]), "3" (dst[0]+(y+srcSliceY)*dstStride[0]),
+ "4" (c->dstW),
+ "5" (c->sparc_coeffs)
+ );
+ }
+
+ return srcSliceH;
+}
+
+SwsFunc yuv2rgb_init_vis(SwsContext *c) {
+ c->sparc_coeffs[5]=c->yCoeff;
+ c->sparc_coeffs[6]=c->vgCoeff;
+ c->sparc_coeffs[7]=c->vrCoeff;
+ c->sparc_coeffs[8]=c->ubCoeff;
+ c->sparc_coeffs[9]=c->ugCoeff;
+
+ c->sparc_coeffs[0]=(((int16_t)c->yOffset*(int16_t)c->yCoeff >>11) & 0xffff) * 0x0001000100010001ULL;
+ c->sparc_coeffs[1]=(((int16_t)c->uOffset*(int16_t)c->ubCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
+ c->sparc_coeffs[2]=(((int16_t)c->uOffset*(int16_t)c->ugCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
+ c->sparc_coeffs[3]=(((int16_t)c->vOffset*(int16_t)c->vgCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
+ c->sparc_coeffs[4]=(((int16_t)c->vOffset*(int16_t)c->vrCoeff>>11) & 0xffff) * 0x0001000100010001ULL;
+
+ if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV422P && (c->dstW & 7)==0) {
+ av_log(c, AV_LOG_INFO, "SPARC VIS accelerated YUV422P -> RGB32\n");
+ return vis_422P_ARGB32;
+ }
+ else if (c->dstFormat == PIX_FMT_RGB32 && c->srcFormat == PIX_FMT_YUV420P && (c->dstW & 7)==0) {
+ av_log(c, AV_LOG_INFO, "SPARC VIS accelerated YUV420P -> RGB32\n");
+ return vis_420P_ARGB32;
+ }
+ return NULL;
+}